Python numpy 模块,asscalar() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.asscalar()

项目:Interactivity    作者:treeoftenere    | 项目源码 | 文件源码
def _send_sparseOutput(self, output, timestamp, name):
        for out in self._sparseOutput_threads:
            if isinstance(out, str):  # LSL outlet
                raise NotImplementedError
            else:  # OSC output stream
                if USE_LIBLO:
                    if (np.array(output).size==1):
                         new_output = [('f', np.asscalar(output))]
                         message = Message('/{}'.format(name), *new_output)
                    else:
                         new_output = [('f', x) for x in output[:]]
                         message = Message('/{}'.format(name), *new_output)
                    #send(out, Bundle(timestamp, message))
                    send(out, message)
                else:
                    raise NotImplementedError
            if self.verbose:
                print('spareOutput: {}'.format(output))
项目:bayestsa    作者:thalesians    | 项目源码 | 文件源码
def predict(self):
        try:
            X, Wm, Wc = sigmaPoints(self.xa, self.Pa)
        except:
            warnings.warn('Encountered a matrix that is not positive definite in the sigma points calculation at the predict step')
            self.Pa = nearpd(self.Pa)
            X, Wm, Wc = sigmaPoints(self.xa, self.Pa)
        fX, x, Pxx = unscentedTransform(X, Wm, Wc, self.fa)
        x = np.asscalar(x)
        Pxx = np.asscalar(Pxx)

        Pxv = 0.
        N = np.shape(X)[1]
        for j in range(0, N):
            Pxv += Wc[j] * fX[0,j] * X[3,j]

        self.xa = np.array( ((x,), (0.,), (0.,), (0.,)) )
        self.Pa = np.array( ((Pxx, Pxv   , 0.      , 0.      ),
                             (Pxv, self.R, 0.      , 0.      ),
                             (0. , 0.    , self.Q  , self.cor),
                             (0. , 0.    , self.cor, self.R  )) )
项目:DeepLearning_PlantDiseases    作者:MarkoArsenovic    | 项目源码 | 文件源码
def Saliency_map(image,model,preprocess,ground_truth,use_gpu=False,method=util.GradType.GUIDED):
    vis_param_dict['method'] = method
    img_tensor = preprocess(image)
    img_tensor.unsqueeze_(0)
    if use_gpu:
        img_tensor=img_tensor.cuda()
    input = Variable(img_tensor,requires_grad=True)

    if  input.grad is not None:
        input.grad.data.zero_()

    model.zero_grad()
    output = model(input)
    ind=torch.LongTensor(1)
    if(isinstance(ground_truth,np.int64)):
        ground_truth=np.asscalar(ground_truth)
    ind[0]=ground_truth
    ind=Variable(ind)
    energy=output[0,ground_truth]
    energy.backward() 
    grad=input.grad
    if use_gpu:
        return np.abs(grad.data.cpu().numpy()[0]).max(axis=0)
    return np.abs(grad.data.numpy()[0]).max(axis=0)
项目:nelpy    作者:nelpy    | 项目源码 | 文件源码
def mean(self,*,axis=None):
        """Returns the mean of firing rate (in Hz).
        Parameters
        ----------
        axis : int, optional
            When axis is None, the global mean firing rate is returned.
            When axis is 0, the mean firing rates across units, as a
            function of the external correlate (e.g. position) are
            returned.
            When axis is 1, the mean firing rate for each unit is
            returned.
        Returns
        -------
        mean :
        """
        means = np.mean(self.ratemap, axis=axis).squeeze()
        if means.size == 1:
            return np.asscalar(means)
        return means
项目:nelpy    作者:nelpy    | 项目源码 | 文件源码
def max(self,*,axis=None):
        """Returns the mean of firing rate (in Hz).
        Parameters
        ----------
        axis : int, optional
            When axis is None, the global mean firing rate is returned.
            When axis is 0, the mean firing rates across units, as a
            function of the external correlate (e.g. position) are
            returned.
            When axis is 1, the mean firing rate for each unit is
            returned.
        Returns
        -------
        mean :
        """
        maxes = np.max(self.ratemap, axis=axis).squeeze()
        if maxes.size == 1:
            return np.asscalar(maxes)
        return maxes
项目:nelpy    作者:nelpy    | 项目源码 | 文件源码
def min(self,*,axis=None):
        """Returns the mean of firing rate (in Hz).
        Parameters
        ----------
        axis : int, optional
            When axis is None, the global mean firing rate is returned.
            When axis is 0, the mean firing rates across units, as a
            function of the external correlate (e.g. position) are
            returned.
            When axis is 1, the mean firing rate for each unit is
            returned.
        Returns
        -------
        mean :
        """
        mins = np.min(self.ratemap, axis=axis).squeeze()
        if mins.size == 1:
            return np.asscalar(mins)
        return mins
项目:malmo-challenge    作者:Kaixhin    | 项目源码 | 文件源码
def inject_summaries(self, idx):
        if len(self._stats_mean_qvalues) > 0:
            self.visualize(idx, "%s/episode mean q" % self.name,
                           np.asscalar(np.mean(self._stats_mean_qvalues)))
            self.visualize(idx, "%s/episode mean stddev.q" % self.name,
                           np.asscalar(np.mean(self._stats_stddev_qvalues)))

        if len(self._stats_loss) > 0:
            self.visualize(idx, "%s/episode mean loss" % self.name,
                           np.asscalar(np.mean(self._stats_loss)))

        if len(self._stats_rewards) > 0:
            self.visualize(idx, "%s/episode mean reward" % self.name,
                           np.asscalar(np.mean(self._stats_rewards)))

            # Reset
            self._stats_mean_qvalues = []
            self._stats_stddev_qvalues = []
            self._stats_loss = []
            self._stats_rewards = []
项目:malmo-challenge    作者:Microsoft    | 项目源码 | 文件源码
def inject_summaries(self, idx):
        if len(self._stats_mean_qvalues) > 0:
            self.visualize(idx, "%s/episode mean q" % self.name,
                           np.asscalar(np.mean(self._stats_mean_qvalues)))
            self.visualize(idx, "%s/episode mean stddev.q" % self.name,
                           np.asscalar(np.mean(self._stats_stddev_qvalues)))

        if len(self._stats_loss) > 0:
            self.visualize(idx, "%s/episode mean loss" % self.name,
                           np.asscalar(np.mean(self._stats_loss)))

        if len(self._stats_rewards) > 0:
            self.visualize(idx, "%s/episode mean reward" % self.name,
                           np.asscalar(np.mean(self._stats_rewards)))

            # Reset
            self._stats_mean_qvalues = []
            self._stats_stddev_qvalues = []
            self._stats_loss = []
            self._stats_rewards = []
项目:nupic-history-server    作者:htm-community    | 项目源码 | 文件源码
def _conjurePotentialPools(self, **kwargs):
    # These only need to be fetched from the SP once.
    if self._potentialPools:
      return self._potentialPools
    sp = self._sp
    out = []
    for colIndex in range(0, sp.getNumColumns()):
      columnPool = self._getZeroedInput()
      columnPoolIndices = []
      sp.getPotential(colIndex, columnPool)
      for i, pool in enumerate(columnPool):
        if np.asscalar(pool) == 1.0:
          columnPoolIndices.append(i)
      out.append(columnPoolIndices)
    self._potentialPools = out
    return out
项目:MagicWand    作者:GianlucaSilvestri    | 项目源码 | 文件源码
def delta_e_cie1994(color1, color2, K_L=1, K_C=1, K_H=1, K_1=0.045, K_2=0.015):
    """
    Calculates the Delta E (CIE1994) of two colors.

    K_l:
      0.045 graphic arts
      0.048 textiles
    K_2:
      0.015 graphic arts
      0.014 textiles
    K_L:
      1 default
      2 textiles
    """

    color1_vector = _get_lab_color1_vector(color1)
    color2_matrix = _get_lab_color2_matrix(color2)
    delta_e = color_diff_matrix.delta_e_cie1994(
        color1_vector, color2_matrix, K_L=K_L, K_C=K_C, K_H=K_H, K_1=K_1, K_2=K_2)[0]
    return numpy.asscalar(delta_e)


# noinspection PyPep8Naming
项目:mushroom    作者:carloderamo    | 项目源码 | 文件源码
def get_value(self, *args, **kwargs):
        if len(args) == 2:
            gradient = args[0]
            nat_gradient = args[1]
            tmp = np.asscalar(gradient.dot(nat_gradient))
            lambda_v = np.sqrt(tmp / (4. * self._eps))
            # For numerical stability
            lambda_v = max(lambda_v, 1e-8)
            step_length = 1. / (2. * lambda_v)

            return step_length
        elif len(args) == 1:
            return self.get_value(args[0], args[0], **kwargs)
        else:
            raise ValueError('Adaptive parameters needs gradient or gradient'
                             'and natural gradient')
项目:tensorflow-rl    作者:steveKapturowski    | 项目源码 | 文件源码
def choose_next_action(self, state):
        network_output_v, network_output_pi, action_repeat_probs = self.session.run(
            [
                self.local_network.output_layer_v,
                self.local_network.output_layer_pi,
                self.local_network.action_repeat_probs,
            ],
            feed_dict={
                self.local_network.input_ph: [state],
            })

        network_output_pi = network_output_pi.reshape(-1)
        network_output_v = np.asscalar(network_output_v)

        action_index = self.sample_policy_action(network_output_pi)
        new_action = np.zeros([self.num_actions])
        new_action[action_index] = 1

        action_repeat = 1 + self.sample_policy_action(action_repeat_probs[0])

        return new_action, network_output_v, network_output_pi, action_repeat
项目:AnswerClassify    作者:kenluck2001    | 项目源码 | 文件源码
def enspredict (Xval, indices):
    '''
        blend models using majority voting scheme
    '''
    totalLabelist = []
    for ind in range (len(Xval)):
        labelist = []
        for model in featureSelectModel:
            label = model.predict( Xval[:, indices ][ind].reshape(1, -1) )
            labelist.append (np.asscalar (label) )

        for model in wholeFeatureModel:
            label = model.predict( Xval[ind].reshape(1, -1) )
            labelist.append (np.asscalar (label) )


        votedLabel = max ( set (labelist), key=labelist.count  )
        totalLabelist.append (votedLabel)

    return totalLabelist
项目:lexdecomp    作者:mcrisc    | 项目源码 | 文件源码
def question_batches(data_file):
    """Iterates over a dataset returning batches composed by a single question
    and its candidate answers.

    :data_file: a HDF5 file object holding the dataset
    :returns: a DataSet namedtuple of arrays (questions, sentences, labels).
    """
    n_questions = np.asscalar(data_file['metadata/questions/count'][...])
    questions_ds = data_file['data/questions']
    sentences_ds = data_file['data/sentences']

    for i in range(n_questions):
        row_labels = data_file['data/labels/q%d' % i][...]
        labels = row_labels[:, 1]
        rows = row_labels[:, 0]
        questions = questions_ds[rows, ...]
        sentences = sentences_ds[rows, ...]
        yield DataSet(questions, sentences, labels)
项目:optimize-stencil    作者:Ablinne    | 项目源码 | 文件源码
def _optimize_single(self, x0):
        x0 = list(x0)

        if x0[0] == None:
            x0[0] = 0
            dt_ok = np.asscalar(self.dispersion.dt_ok(x0))
            if dt_ok < 0:
                # Initial conditions violate constraints, reject
                return x0, None, float('inf')

            x0[0] = dt_ok
            x0[0] = min(x0[0], self.dtmax)
            x0[0] = max(x0[0], self.dtmin)

        x0 = np.asfarray(x0)

        stencil_ok = self.dispersion.stencil_ok(x0)
        if stencil_ok < 0:
            # Initial conditions violate constraints, reject
            return x0, None, float('inf')

        res = scop.minimize(self.dispersion.norm, x0, method='SLSQP', constraints = self.constraints, options = dict(disp=False, iprint = 2))
        norm = self.dispersion_high.norm(res.x)

        return x0, res, norm
项目:redmapper    作者:erykoff    | 项目源码 | 文件源码
def zindex(self,z):
        """
        redshift index lookup

        parameters
        ----------
        z: array of floats

        returns
        -------
        indices: array of integers
            redshift indices

        """
        # return the z index/indices with rounding.

        zind = np.searchsorted(self.zinteger,np.round(np.atleast_1d(z)*self.zbinscale).astype(np.int64))
        if (zind.size == 1):
            return np.asscalar(zind)
        else:
            return zind

        # and check for top overflows.  Minimum is always 0
        #test,=np.where(zind == self.z.size)
        #if (test.size > 0): zind[test] = self.z.size-1
项目:redmapper    作者:erykoff    | 项目源码 | 文件源码
def refmagindex(self,refmag):
        """
        reference magnitude index lookup

        parameters
        ----------
        refmag: array of floats

        returns
        -------
        indices: array of integers
            refmag indices
        """
        # return the refmag index/indices with rounding

        refmagind = np.searchsorted(self.refmaginteger,np.round(np.atleast_1d(refmag)*self.refmagbinscale).astype(np.int64))
        if (refmagind.size == 1):
            return np.asscalar(refmagind)
        else:
            return refmagind
项目:redmapper    作者:erykoff    | 项目源码 | 文件源码
def lumrefmagindex(self,lumrefmag):
        """
        luminosity reference magnitude index lookup

        parameters
        ----------
        lumrefmag: array of floats

        returns
        -------
        indices: array of integers
            lumrefmag indices

        """

        lumrefmagind = np.searchsorted(self.lumrefmaginteger,np.round(np.atleast_1d(lumrefmag)*self.refmagbinscale).astype(np.int64))
        if (lumrefmagind.size == 1):
            return np.asscalar(lumrefmagind)
        else:
            return lumrefmagind
项目:DistributedES    作者:ShangtongZhang    | 项目源码 | 文件源码
def __call__(self, o_):
        if np.isscalar(o_):
            o = torch.FloatTensor([o_])
        else:
            o = torch.FloatTensor(o_)
        self.online_stats.feed(o)
        if self.offline_stats.n[0] == 0:
            return o_
        std = (self.offline_stats.v + 1e-6) ** .5
        o = (o - self.offline_stats.m) / std
        o = o.numpy()
        if np.isscalar(o_):
            o = np.asscalar(o)
        else:
            o = o.reshape(o_.shape)
        return o
项目:opentrack-prototyping    作者:DaMichel    | 项目源码 | 文件源码
def calculateQ(self, k):
      Q = M(self.Q(k))
      R = M(self.kf.R(k))
      H = M(self.kf.H(k))
      D = M(self.D[k-1])

      alpha = np.trace(D - R) / np.trace(H * M(self.kf.Pminus[k-1]) * H.T)
      alpha = np.asscalar(alpha)
      if np.isfinite(alpha) and alpha>0:
          alpha = np.power(alpha, self.exponent)
          alpha = max(0.0001, min(alpha, 1000.*mt.trace(R) / mt.trace(Q)))
      else:
          alpha = 0.0001
      Q = Q * alpha
      self.alpha[k] = alpha
      return Q
项目:DeepRL    作者:ShangtongZhang    | 项目源码 | 文件源码
def __call__(self, o_):
        if np.isscalar(o_):
            o = torch.FloatTensor([o_])
        else:
            o = torch.FloatTensor(o_)
        self.online_stats.feed(o)
        if self.offline_stats.n[0] == 0:
            return o_
        std = (self.offline_stats.v + 1e-6) ** .5
        o = (o - self.offline_stats.m) / std
        o = o.numpy()
        if np.isscalar(o_):
            o = np.asscalar(o)
        else:
            o = o.reshape(o_.shape)
        return o
项目:Crossprop    作者:ShangtongZhang    | 项目源码 | 文件源码
def learn(self, target, epoch=None):
        BasicLearner.learn(self, target, epoch)
        error = np.asscalar(target - self.y)

        self.beta += self.theta * error * self.h * np.asarray(self.phi).flatten()
        self.alpha = np.exp(self.beta)

        self.W += np.matrix(self.alpha * error * np.asarray(self.phi).flatten()).T
        self.U += self.stepSize * self.m

        phi_2 = np.asarray(np.power(self.phi, 2)).flatten()
        m_decay = 1 - self.theta * np.power(self.h, 2) * phi_2
        m_delta = error * self.h * np.asarray(self.X.T * self.gradientAct(self.phi, self.net))
        self.m = m_decay * self.m + self.theta * m_delta

        h_decay = 1 - self.alpha * phi_2
        h_delta = error * self.alpha * np.asarray(self.phi).flatten()
        self.h = h_decay * self.h + h_delta

        return 0.5 * error * error

# TODO: refactor classification learner
项目:regionmask    作者:mathause    | 项目源码 | 文件源码
def _dcoord(coord):
    """determine the spacing of a coordinate"""

    coord = np.array(coord)

    if coord.ndim > 1:
        msg = 'Only 1D coordinates are supported'
        raise AssertionError(msg)

    dcoord = np.unique(np.round(np.diff(coord), 4))

    # irregularly spaced
    if dcoord.size > 1:
        dcoord_str = 'irr'
    # regularly spaced
    else:
        dcoord_str = '{:0.2f}'.format(np.asscalar(dcoord))

    return dcoord_str
项目:malmo-challenge    作者:rhaps0dy    | 项目源码 | 文件源码
def inject_summaries(self, idx):
        if len(self._stats_mean_qvalues) > 0:
            self.visualize(idx, "%s/episode mean q" % self.name,
                           np.asscalar(np.mean(self._stats_mean_qvalues)))
            self.visualize(idx, "%s/episode mean stddev.q" % self.name,
                           np.asscalar(np.mean(self._stats_stddev_qvalues)))

        if len(self._stats_loss) > 0:
            self.visualize(idx, "%s/episode mean loss" % self.name,
                           np.asscalar(np.mean(self._stats_loss)))

        if len(self._stats_rewards) > 0:
            self.visualize(idx, "%s/episode mean reward" % self.name,
                           np.asscalar(np.mean(self._stats_rewards)))

            # Reset
            self._stats_mean_qvalues = []
            self._stats_stddev_qvalues = []
            self._stats_loss = []
            self._stats_rewards = []
项目:irwin    作者:clarkerubber    | 项目源码 | 文件源码
def buildPlayerGameActivationsTable(self, model=None):
    if model is None:
      print("using default model")
      model = self.narrowGameModel.model()
    print("getting players")
    engines = self.env.playerDB.byEngine(True)
    legits = self.env.playerDB.byEngine(False)

    print("got " + str(len(engines + legits)) + " players")

    playerGameActivations = []

    for player in engines + legits:
      print("predicting " + player.id)
      gs = GameAnalysisStore.new()
      gs.addGameAnalyses(self.env.gameAnalysisDB.byUserId(player.id))
      predictions = self.predict(gs.gameAnalysisTensors(), model)
      playerGameActivations.append(PlayerGameActivations(player.id, player.engine, [int(100*np.asscalar(p[0][0][0])) for p in predictions]))

    print("writing to DB")
    self.env.playerGameActivationsDB.lazyWriteMany(playerGameActivations)
项目:pyculib    作者:numba    | 项目源码 | 文件源码
def rotg(self, a, b):
        '''Compute the given rotation matrix given a column vector (a, b).
        Returns r, z, c, s.

        r: r = a ** 2 + b ** 2.

        z: Use to recover c and s.

        if abs(z) < 1:
            c, s = 1 - z ** 2, z
        elif abs(z) == 1:
            c, s = 0, 1
        else:
            c, s = 1 / z, 1 - z ** 2

        c: Cosine element of the rotation matrix.

        s: Sine element of the rotation matrix.
        '''
        a, b = np.asarray(a), np.asarray(b)
        _sentry_same_dtype(a, b)
        fn = self._dispatch(self.rotg.vtable, a.dtype)
        return fn(np.asscalar(a), np.asscalar(b))
项目:rnnlab    作者:phueb    | 项目源码 | 文件源码
def calc_ba_data(self, probe_simmat, multi_probe_list):
        # make thr range
        probe_simmat_mean = np.asscalar(np.mean(probe_simmat))
        thr1 = max(0.0, round(min(0.9, round(probe_simmat_mean, 2)) - 0.1, 2))  # don't change
        thr2 = round(thr1 + 0.2, 2)
        # use bayes optimization to find best_thr
        if SaverConfigs.PRINT_BAYES_OPT:
            print('Finding best thresholds between {} and {} using bayesian-optimization...'.format(thr1, thr2))
        gp_params = {"alpha": 1e-5, "n_restarts_optimizer": 2}
        func_to_be_opt = partial(self.calc_probe_ba_list, probe_simmat, multi_probe_list, True)
        bo = BayesianOptimization(func_to_be_opt, {'thr': (thr1, thr2)}, verbose=SaverConfigs.PRINT_BAYES_OPT)
        bo.explore({'thr': [probe_simmat_mean]})
        bo.maximize(init_points=2, n_iter=SaverConfigs.NUM_BAYES_STEPS,
                    acq="poi", xi=0.001, **gp_params)  # smaller xi: exploitation
        best_thr = bo.res['max']['max_params']['thr']
        # calc probe_ba_list with best_thr
        probe_ba_list = self.calc_probe_ba_list(probe_simmat, multi_probe_list, False, best_thr)
        probe_ba_list = np.multiply(probe_ba_list, 100).tolist()
        # make avg_probe_ba_list
        avg_probe_ba_list = pd.DataFrame(
            data={'probe': multi_probe_list,
                  'probe_ba': probe_ba_list}).groupby('probe').mean()['probe_ba'].values.tolist()
        return probe_ba_list, avg_probe_ba_list
项目:rnnlab    作者:phueb    | 项目源码 | 文件源码
def make_phrase_pps(self,
                        terms):
        print('Making phrase_pps...')
        terms = ['PERIOD'] + terms  # to get pp value for very first term
        num_terms = len(terms)
        task_id = 0
        pps = []
        for n in range(num_terms):
            term_ids = [self.terms.item_id_dict[term] for term in
                        terms[:n + 2]]  # add to to compensate for 0index and y
            window_mat = np.asarray(term_ids)[:, np.newaxis][-self.bptt_steps:].T
            x, y = np.split(window_mat, [-1], axis=1)
            x2 = np.tile(np.eye(GlobalConfigs.NUM_TASKS)[task_id], [1, x.shape[1], 1])
            feed_dict = {self.graph.x: x, self.graph.x2: x2, self.graph.y: y}
            pp = np.asscalar(self.sess.run(self.graph.pps, feed_dict=feed_dict))
            pps.append(pp)
        pps = pps[:-1]  # compensate for PERIOD insertion
        return pps
项目:latenttrees    作者:kaltwang    | 项目源码 | 文件源码
def init_distrib_idx(self, distrib, idx=None):
        assert isinstance(distrib, DistribGauss)
        x = distrib.get_mu()
        if idx is None:
            # initialize prior and thus average over all cases
            mu = np.nanmean(x, axis=0, keepdims=True)
        else:
            # select cases idx
            mu = x[idx, :]
            idx_nan = np.isnan(mu)
            if np.any(idx_nan):
                # we need to randomly select new values for all NaNs
                idx_good = np.ones_like(idx, dtype=bool)
                idx_good[idx, :] = False
                idx_good[np.isnan(x)] = False
                x_good = x[idx_good, :]
                num_nan = np.count_nonzero(idx_nan)
                mu[idx_nan] = np.random.choice(x_good, num_nan, replace=False)
            mu = np.copy(mu)  # make sure to not overwrite data

        std = np.empty_like(mu)
        std.fill(np.asscalar(np.nanstd(x)))
        self.init_data(mu, std)
项目:gym-sandbox    作者:suqi    | 项目源码 | 文件源码
def _police_move_by_continous_angle(self, police_list, police_actions):
        # Accept continous move action, which is more suitable for MADDPG
        # move angle (0~2pi)
        police_actions = np.clip(police_actions, 0, 2*np.pi)

        police_new_loc = police_list.copy()
        police_speed = self.teams['police']['speed']
        for _i, _a in enumerate(police_actions):
            _a = np.asscalar(_a)  # transform array to scalar
            action_dir = np.array([np.cos(_a), np.sin(_a)])
            police_dir = action_dir * police_speed
            _p = police_list[_i]
            _p = (_p[0] + police_dir[0], _p[1] + police_dir[1])
            _p = self.ensure_inside(_p)
            police_new_loc[_i] = _p

        return police_new_loc
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def load_h5_namedtuple(group, namedtuple):
    """ Load a single namedtuple from an h5 group """
    args = []
    for field in namedtuple._fields:
        try:
            field_value = getattr(group, field).read()
            if field_value.shape == ():
                field_value = np.asscalar(field_value)
        except tables.NoSuchNodeError:
            try:
                field_value = getattr(group._v_attrs, field)
            except AttributeError:
                field_value = None
        args.append(field_value)
    return namedtuple(*args)
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def json_sanitize(data):
    # This really doesn't make me happy. How many cases we we have to test?
    if (type(data) == float) or (type(data) == numpy.float64):
        # Handle floats specially
        if math.isnan(data):
            return "NaN";
        if (data ==  float("+Inf")):
            return "inf"
        if (data == float("-Inf")):
            return "-inf"
        return data
    elif hasattr(data, 'iterkeys'):
        # Dictionary case
        new_data = {}
        for k in data.keys():
            new_data[k] = json_sanitize(data[k])
        return new_data
    elif hasattr(data, '__iter__'):
        # Anything else that looks like a list. N
        new_data = []
        for d in data:
            new_data.append(json_sanitize(d))
        return new_data
    elif hasattr(data, 'shape') and data.shape == ():
        # Numpy 0-d array
        return np.asscalar(data)
    else:
        return data
项目:SpicePy    作者:giaccone    | 项目源码 | 文件源码
def sin(Vo, Va, Freq=None, Td=0, Df=0, Phase=0, t=None):
    """
    SIN provides a damped sinusoidal waveform in the form
    Vo + Va * np.sin(2 * np.pi * Freq * t + Phase * (np.pi / 180))

    The waveforms is:
        * t < Td ==> Vo + Va * np.sin(Phase * (np.pi / 180))
        * t > Td ==> Vo + Va * np.sin(2 * np.pi * Freq * (t - Td) + Phase * (np.pi / 180)) * np.exp(-(t - Td) * Df)

    :param Vo: offset
    :param Va: amplitude (peak) of the waveform
    :param Freq: frequency (Hz)
    :param Td: delay time (s)
    :param Df: damping factor (1/s)
    :param Phase: voltage phase (deg)
    :param t: array with times where the function has to be evaluated
    :return: the function values at times defined in t
    """

    # check presence of time array
    if t is None:
        raise TypeError('Missing time array')

    # check if t is scalar
    if isinstance(t, (int, float)):
        t = np.array([t])

    # check presence of Freq
    if Freq is None:
        Freq = 1 / t[-1]

    out = np.zeros_like(t)
    out[t <= Td] = Vo + Va * np.sin(Phase * (np.pi / 180))
    out[t > Td] = Vo + Va * np.sin(2 * np.pi * Freq * (t[t > Td] - Td) + Phase * (np.pi / 180)) * np.exp(-(t[t > Td] - Td) * Df)

    # if input is scalar convert out to scalar too
    if out.size == 1:
        out = np.asscalar(out)

    return out
项目:xarray-simlab    作者:benbovy    | 项目源码 | 文件源码
def validate(self):
        if np.asscalar(self.x_size.value) is None:
            self.x_size.value = 5
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __read_str(self, numchars=1, utf=None):
        """
        Read a string of a specific length.

        This is compatible with python 2 and python 3.
        """
        rawstr = np.asscalar(np.fromfile(self._fsrc,
                                         dtype='S%s' % numchars, count=1))
        if utf or (utf is None and PY_VER == 3):
            return rawstr.decode('utf-8')
        return rawstr
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __read_comment(self):
        """
        Read a single comment.

        The comment is stored as an Event in Segment 0, which is
        specifically for comments.

        ----------------------
        Returns an empty list.

        The returned object is already added to the Block.

        No ID number: always called from another method
        """

        # float64 -- timestamp (number of days since dec 30th 1899)
        time = np.fromfile(self._fsrc, dtype=np.double, count=1)[0]

        # int16 -- length of next string
        numchars1 = np.asscalar(np.fromfile(self._fsrc,
                                            dtype=np.int16, count=1))

        # char * numchars -- the one who sent the comment
        sender = self.__read_str(numchars1)

        # int16 -- length of next string
        numchars2 = np.asscalar(np.fromfile(self._fsrc,
                                            dtype=np.int16, count=1))

        # char * numchars -- comment text
        text = self.__read_str(numchars2, utf=False)

        comment = Event(times=pq.Quantity(time, units=pq.d), labels=text,
                        sender=sender, file_origin=self._file_origin)

        self._seg0.events.append(comment)

        return []
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __read_str(self, numchars=1, utf=None):
        """
        Read a string of a specific length.

        This is compatible with python 2 and python 3.
        """
        rawstr = np.asscalar(np.fromfile(self._fsrc,
                                         dtype='S%s' % numchars, count=1))
        if utf or (utf is None and PY_VER == 3):
            return rawstr.decode('utf-8')
        return rawstr
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __read_comment(self):
        """
        Read a single comment.

        The comment is stored as an Event in Segment 0, which is
        specifically for comments.

        ----------------------
        Returns an empty list.

        The returned object is already added to the Block.

        No ID number: always called from another method
        """

        # float64 -- timestamp (number of days since dec 30th 1899)
        time = np.fromfile(self._fsrc, dtype=np.double, count=1)[0]

        # int16 -- length of next string
        numchars1 = np.asscalar(np.fromfile(self._fsrc,
                                            dtype=np.int16, count=1))

        # char * numchars -- the one who sent the comment
        sender = self.__read_str(numchars1)

        # int16 -- length of next string
        numchars2 = np.asscalar(np.fromfile(self._fsrc,
                                            dtype=np.int16, count=1))

        # char * numchars -- comment text
        text = self.__read_str(numchars2, utf=False)

        comment = Event(times=pq.Quantity(time, units=pq.d), labels=text,
                        sender=sender, file_origin=self._file_origin)

        self._seg0.events.append(comment)

        return []
项目:bayestsa    作者:thalesians    | 项目源码 | 文件源码
def observe(self, y):
        self.lastobservation = y

        xa = self.xa[0:2:1,0:1:1]
        Pa = self.Pa[0:2:1,0:2:1]
        try:
            X, Wm, Wc = sigmaPoints(xa, Pa)
        except:
            warnings.warn('Encountered a matrix that is not positive definite in the sigma points calculation at the observe step')
            Pa = nearpd(Pa)
            X, Wm, Wc = sigmaPoints(xa, Pa)
        hX, self.predictedobservation, Pyy = \
                unscentedTransform(X, Wm, Wc, self.ha)
        self.predictedobservation = np.asscalar(self.predictedobservation)
        Pyy = np.asscalar(Pyy)
        self.innovcov = Pyy

        x = self.xa[0,0]
        Pxy = 0.
        Pvy = 0.
        M = np.shape(X)[1]
        for j in range(0, M):
            haImage = self.ha(X[:,j])
            Pxy += Wc[j] * (X[0,j] - x) * (haImage - self.predictedobservation)
            Pvy += Wc[j] * X[1,j] * haImage

        Pa = np.array( ((Pxy,), (Pvy,), (0.,), (0.,)) )
        K = Pa * (1./Pyy)
        self.gain = K[0,0]

        self.innov = y - self.predictedobservation        

        self.xa += K * self.innov
        self.Pa -= np.dot(K, Pa.T)

        self.loglikelihood += UnscentedKalmanFilter.MINUS_HALF_LN_2PI - .5 * (np.log(self.innovcov) + self.innov * self.innov / self.innovcov)
项目:muxrplot    作者:sirmo    | 项目源码 | 文件源码
def format_time(ts):
    res = []
    for each in ts:
        res.append(std_time.strftime("%H:%M.%S", std_time.localtime(np.asscalar(np.int32(each)))))
    return res
项目:muxrplot    作者:sirmo    | 项目源码 | 文件源码
def get_date_range(df, timestamp_colkey):
    max_time = df[timestamp_colkey].max()
    min_time = df[timestamp_colkey].min()
    t_to = std_time.strftime("%d-%b-%Y", std_time.localtime(np.asscalar(np.int32(max_time))))
    t_from = std_time.strftime("%d-%b-%Y", std_time.localtime(np.asscalar(np.int32(min_time))))
    if t_to == t_from:
        return t_to
    return "{} - {}".format(t_from, t_to)
项目:Wasserstein-Learning-For-Point-Process    作者:xiaoshuai09    | 项目源码 | 文件源码
def generate_samples_marked(intensity, T, n):
    U = intensity.dim
    Sequences = []
    inds = np.arange(U)
    for i in range(n):
        seq = []
        t = 0
        while True:
            intens1 = intensity.getUpperBound(t,T,inds)
            #print(intens1)
            dt = np.random.exponential(1/sum(intens1))
            #print(dt)
            new_t = t + dt
            #print(new_t)
            if new_t > T:
                break
            intens2 = intensity.getValue(new_t, inds)
            #print(intens2)
            u = np.random.uniform()
            if sum(intens2)/sum(intens1) > u:
                #print(intens2)
                x_sum = sum(intens2)
                norm_i = [ x/x_sum for x in intens2]
                #print(norm_i)
                dim = np.nonzero(np.random.multinomial(1, norm_i))
                seq.append([new_t, np.asscalar(dim[0])])
            t = new_t
        if len(seq)>1:
            Sequences.append(seq) 
    return Sequences
项目:Interactivity    作者:treeoftenere    | 项目源码 | 文件源码
def _send_output(self, output, timestamp, name):
        """Send pipeline outputs through the LSL or OSC stream.
        NOT PER CHANNEL
        Args:
            output (scalar): output of the pipeline
            timestamp (float): timestamp
        """
        for out in self._output_threads:
            if isinstance(out, str):  # LSL outlet
                raise NotImplementedError
                # self._outlet.push_sample([output], timestamp=timestamp)

            else:  # OSC output stream
                if USE_LIBLO:
                    if (np.array(output).size==1):
                         new_output = [('f', np.asscalar(output))]
                         message = Message('/{}'.format(name), *new_output)
                    else:
                         new_output = [('f', x) for x in output[:]]
                         message = Message('/{}'.format(name), *new_output)
 #                   send(out, Bundle(timestamp, message))
                    send(out, message)
                else:
                    raise NotImplementedError
#                    self._client.send_message(}{}'.format(name),output[:])
            if self.verbose:
                print('Output: {}'.format(output))
项目:solar-correlation-map    作者:Zapf-Consulting    | 项目源码 | 文件源码
def label_to_idx(labels, label):
    center_idx_bool = labels == label
    return np.asscalar(np.where(center_idx_bool)[0]), center_idx_bool
项目:unsupervised-2017-cvprw    作者:imatge-upc    | 项目源码 | 文件源码
def _process_video(filename, coder):
    """
    Process a single video file using FFmpeg
    Args
        filename: path to the video file
        coder: instance of ImageCoder to provide TensorFlow image coding utils.
    Returns:
        video_buffer: numpy array with the video frames
        mask_buffer: activity mask of the video frames
        frame_h: integer, video height in pixels.
        frame_w: integer, width width in pixels.
        seq_length: sequence length (non-zero frames)
    """

    video, raw_h, raw_w, seq_length = coder.decode_video(filename)
    video = video.astype(np.uint8)
    assert len(video.shape) == 4
    assert video.shape[3] == 3
    frame_h, frame_w = video.shape[1], video.shape[2]

    # generate mask from annotations
    groups = filename.split('/')
    annot_file_name = groups[-1].split('.')[0] + '.xgtf'
    annot_file_path = os.path.join(FLAGS.annotation_directory, groups[-2], annot_file_name)
    parsed_bbx = _parse_annotation_xml(annot_file_path)
    if FLAGS.resize_h != -1:
        parsed_bbx = _resize_bbx(parsed_bbx, raw_h, raw_w)
    masks = _bbx_to_mask(parsed_bbx, seq_length, FLAGS.resize_h, FLAGS.resize_w)

    encoded_frames_seq = []
    encoded_masks_seq  = []
    for idx in range(seq_length):
        encoded_frames_seq.append(coder.encode_frame(video[idx, :, :, :]))
        encoded_masks_seq.append(coder.encode_mask(masks[idx, :, :, :]))

    return encoded_frames_seq, encoded_masks_seq, frame_h, frame_w, np.asscalar(seq_length)
项目:srep    作者:Answeror    | 项目源码 | 文件源码
def _get_trial(root, combo):
    path = get_path(root, combo)
    mat = spio.loadmat(path)
    data = mat['data'].astype(np.float32)
    gesture = np.repeat(label_to_gesture(np.asscalar(mat['label'].astype(np.int))), len(data))
    subject = np.repeat(np.asscalar(mat['subject'].astype(np.int)), len(data))
    return Trial(data=data, gesture=gesture, subject=subject)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def assign_scalar(message, value):
    """
    Adds the appropriate scalar type of value to the protobuf message
    """
    if value is None:
        message.null_val = True
    elif isinstance(value, np.generic):
        assign_scalar(message, np.asscalar(value))
    elif isinstance(value, (str, six.text_type)):
        message.string_val = value
    elif isinstance(value, np.dtype):
        message.dtype_val = dtype_to_protobuf(value)
    elif isinstance(value, float):
        message.double_val = value
    elif isinstance(value, bool):
        message.bool_val = value
    elif isinstance(value, six.integer_types):
        message.int_val = value
    elif isinstance(value, slice):
        slice_val = ops_pb.Slice()
        if value.start is not None:
            slice_val.start.value = value.start
        if value.step is not None:
            slice_val.step.value = value.step
        if value.stop is not None:
            slice_val.stop.value = value.stop
        message.slice_val.CopyFrom(slice_val)
    elif isinstance(value, dict):
        for key in value:
            assign_scalar(message.map_val.map[key], value[key])
        # This encodes an empty dict for deserialization
        assign_scalar(message.map_val.map['_ngraph_map_sentinel_'], '')
    elif isinstance(value, Axis):
        message.axis.CopyFrom(axis_to_protobuf(value))
    elif isinstance(value, AxesMap):
        message.axes_map.CopyFrom(axes_map_to_protobuf(value))
    else:
        raise unhandled_scalar_value(value)
项目:OnPLS    作者:tomlof    | 项目源码 | 文件源码
def f(self, X, W):
        """Function value.
        """
        X_ = list(X)
        n = len(X_)
        K = W[0].shape[1]  # The number of components
        f = 0.0
        for k in range(K):
            for i in range(n):
                wik = W[i][:, [k]]
                for j in range(n):
                    if self.pred_comp[i][j] > 0:
                        wjk = W[j][:, [k]]

                        ti = np.dot(X_[i], wik)
                        tj = np.dot(X_[j], wjk)

                        f += np.asscalar(np.dot(ti.T, tj))

            # Deflate for next component
            if k < K - 1:  # Do not deflate for last component
                for i in range(n):
                    wi = W[i][:, k]
                    ti = np.dot(X_[i], wi)
                    titi = np.asscalar(np.dot(ti.T, ti))
                    if titi > consts.TOLERANCE:
                        pi = np.dot(X_[i].T, ti) / titi

                        X_[i] = X_[i] - np.dot(ti, pi.T)  # Deflate
                    # else:
                    #     pi = np.zeros_like(wi)

        return f
项目:AND4NMF    作者:PrincetonML    | 项目源码 | 文件源码
def compute_error(A_in, Ag_in):
    A = A_in
    Ag = Ag_in

    #reallign
    D = A.shape[1]
    inner = np.zeros((D, D))
    for i in range(D):
        for j in range(D):
            inner[i, j] = np.asscalar(A[:, i].transpose() * Ag[:, j] )/(norm(A[:, i]) * norm(Ag[:, j]))

    max = np.argmax(inner, axis = 0)
    P = np.asmatrix(np.zeros((D, D)))
    for i in range(D):
        P[i, max[i]] = 1

    # print "normalize the rows of A and A^*"
    inv_norm_A = np.asarray(1.0 / np.apply_along_axis(norm, 0, A))
    A = A * np.diag(inv_norm_A)
    inv_norm_Ag = np.asarray(1.0 / np.apply_along_axis(norm, 0, Ag))
    Ag = Ag * np.diag(inv_norm_Ag)

    u = np.asmatrix(np.ones((1, D)))
    #for each A_i^* we try to find the A_i that is closest to A_i^*
    error = 0
    for i in range(D):
        Ag_i = Ag[:, i]
        inner_product = np.asmatrix(Ag_i.transpose() * A)
        norm_A = np.asmatrix(np.diag(A.transpose() * A))
        z = np.divide(inner_product, norm_A).transpose()
        z = np.asarray(z).flatten().transpose()
        scalar = np.diag(z)
        As = A * scalar
        diff = np.apply_along_axis(norm, 0, As - Ag_i * u)
        # min_idx = np.argmin(diff)
        # print 'for Ag_%d: A_%d' % (i, min_idx)
        difmin = np.amin(diff)
        difmin = difmin * difmin
        error = error + difmin

    return error
项目:nelpy    作者:nelpy    | 项目源码 | 文件源码
def mean(self,*,axis=1):
        """Returns the mean of each signal in AnalogSignalArray."""
        try:
            means = np.mean(self._ydata, axis=axis).squeeze()
            if means.size == 1:
                return np.asscalar(means)
            return means
        except IndexError:
            raise IndexError("Empty AnalogSignalArray cannot calculate mean")