Python numpy 模块,put() 实例源码

我们从Python开源项目中,提取了以下37个代码示例,用于说明如何使用numpy.put()

项目:seniority_list    作者:rubydatasystems    | 项目源码 | 文件源码
def age_correction(month_nums_array,
                   ages_array,
                   retage):
    '''Long_Form
    Returns a long_form (all months) array of employee ages by
    incrementing starting ages according to month number.
    Note:  Retirement age increases are handled by the build_program_files
    script by incrementing retirement dates and by the clip_ret_ages
    function within the make_skeleton script.
    inputs
        month_nums_array (array)
            gen_month_skeleton function output (ndarray)
        ages_array (array)
            starting_age function output aligned with long_form (ndarray)
            i.e. s_age is starting age (aligned to empkeys)
            repeated each month.
        retage (integer or float)
            output clip upper limit for retirement age
    Output is s_age incremented by a decimal month value according to month_num
    (this is candidate for np.put refactored function)
    '''
    month_val = 1 / 12
    result_array = (month_nums_array * month_val) + ages_array
    result_array = np.clip(result_array, 0, retage)

    return result_array


# FIND CONTRACT PAY YEAR AND RAISE (contract pay year
# and optional raise multiplier)
项目:mcv-m5    作者:david-vazquez    | 项目源码 | 文件源码
def bilinear(w, h):
    import math
    data = np.zeros((w*h), dtype=float)
    f = math.ceil(w / 2.)
    c = (2 * f - 1 - f % 2) / (2. * f)
    # print ('f:{}, c:{}'.format(f, c))
    for i in range(w*h):
        x = float(i % w)
        y = float((i / w) % h)
        v = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
        # print ('x:{}, y:{}, v:{}'.format(x, y, v))
        np.put(data, i, v)
    data = data.reshape((h, w))
    return data


# Create 4D bilinear interpolation kernel in numpy for even size filters
项目:skp_edu_docker    作者:TensorMSA    | 项目源码 | 文件源码
def get_vector(self, item):
        """
        get vector matrix of item
        :param item:
        :return:
        """
        try :
            self.bucket.fill(self.pad)
            if(item == '#') :
                return self.bucket.copy()

            idx = self.get_idx(item)
            if(idx >= 0 and (self.bucket_size > idx)) :
                np.put(self.bucket, idx, 1)
                return self.bucket.copy()
            else :
                return None
        except Exception as e :
            raise Exception ("get vector error !")
项目:keras_zoo    作者:david-vazquez    | 项目源码 | 文件源码
def bilinear(w, h):
    import math
    data = np.zeros((w*h), dtype=float)
    f = math.ceil(w / 2.)
    c = (2 * f - 1 - f % 2) / (2. * f)
    # print ('f:{}, c:{}'.format(f, c))
    for i in range(w*h):
        x = float(i % w)
        y = float((i / w) % h)
        v = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
        # print ('x:{}, y:{}, v:{}'.format(x, y, v))
        np.put(data, i, v)
    data = data.reshape((h, w))
    return data


# Create 4D bilinear interpolation kernel in numpy for even size filters
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_put(self):
        icodes = np.typecodes['AllInteger']
        fcodes = np.typecodes['AllFloat']
        for dt in icodes + fcodes + 'O':
            tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)

            # test 1-d
            a = np.zeros(6, dtype=dt)
            a.put([1, 3, 5], [1, 3, 5])
            assert_equal(a, tgt)

            # test 2-d
            a = np.zeros((2, 3), dtype=dt)
            a.put([1, 3, 5], [1, 3, 5])
            assert_equal(a, tgt.reshape(2, 3))

        for dt in '?':
            tgt = np.array([False, True, False, True, False, True], dtype=dt)

            # test 1-d
            a = np.zeros(6, dtype=dt)
            a.put([1, 3, 5], [True]*3)
            assert_equal(a, tgt)

            # test 2-d
            a = np.zeros((2, 3), dtype=dt)
            a.put([1, 3, 5], [True]*3)
            assert_equal(a, tgt.reshape(2, 3))

        # check must be writeable
        a = np.zeros(6)
        a.flags.writeable = False
        assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])

        # when calling np.put, make sure a
        # TypeError is raised if the object
        # isn't an ndarray
        bad_array = [1, 2, 3]
        assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
项目:the-wavenet-pianist    作者:821760408-sp    | 项目源码 | 文件源码
def midi_notes_encoding(audio):
        """
        Compute frame-based midi encoding of audio
        :param audio: 1-D array of audio time series 
        """
        pitches, magnitudes = librosa.piptrack(audio)
        pitches = np.transpose(pitches)
        magnitudes = np.transpose(magnitudes)
        lc = np.zeros((pitches.shape[0], 88), dtype=np.float32)
        for i in range(pitches.shape[0]):
            # Count non-zero entries of pitches
            nz_count = len(np.nonzero(pitches[i])[0])
            # Keep a maximum of 6 detected pitches
            num_ind_to_keep = min(nz_count, 6)
            ind_of_largest_pitches = np.argpartition(
                magnitudes[i], -num_ind_to_keep)[-num_ind_to_keep:] \
                if num_ind_to_keep != 0 else []
            # Convert the largest pitches to midi notes
            overtone_limit = librosa.midi_to_hz(96)[0]
            ind_of_largest_pitches = filter(
                lambda x: pitches[i, x] <= overtone_limit,
                ind_of_largest_pitches)
            midi_notes = librosa.hz_to_midi(pitches[i, ind_of_largest_pitches])
            midi_notes = midi_notes.round()
            # Normalize magnitudes of pitches
            midi_mags = magnitudes[i, ind_of_largest_pitches] / \
                        np.linalg.norm(magnitudes[i, ind_of_largest_pitches], 1)
            np.put(lc[i], midi_notes.astype(np.int64) - [9], midi_mags)
        return lc
项目:seniority_list    作者:rubydatasystems    | 项目源码 | 文件源码
def mark_for_furlough(orig_range,
                      fur_range,
                      month,
                      jobs_avail,
                      num_of_job_levels):
    '''Assign fur code to employees when count of jobs is
    less than count of active employees in inverse seniority
    order and assign furloughed job level number.
    note: normally only called during a job change month though it
    will do no harm if called in other months
    inputs
        orig_range
            current month slice of jobs held
        fur_range
            current month slice of fur data
        month
            current month (loop) number
        jobs_avail
            total number of jobs for each month
            array, job_gain_loss_table function output [1]
        num_of_job_levels
            from settings dictionary, used to mark fur job level as
            num_of_job_levels + 1
    '''
    active_count = np.count_nonzero(fur_range == 0)

    excess_job_slots = jobs_avail[month] - active_count

    if excess_job_slots >= 0:
        return

    elif excess_job_slots < 0:

        non_fur_indexes = np.where(fur_range == 0)[0]

        np.put(fur_range,
               non_fur_indexes[excess_job_slots:],
               1)
        np.put(orig_range,
               non_fur_indexes[excess_job_slots:],
               num_of_job_levels + 1)
项目:polara    作者:Evfro    | 项目源码 | 文件源码
def _remap_factors(entity_mapping, entity_factors, num_entities, num_factors):
        shape = (num_entities, num_factors)
        entity_id = np.repeat(entity_mapping.loc[:, 1].values, num_factors, axis=0).astype(np.int64)
        factor_id = entity_factors['col2'].values.astype(np.int64)
        entity_factors_idx = np.ravel_multi_index((entity_id, factor_id), dims=shape)
        entity_factors_new = np.zeros(shape)
        np.put(entity_factors_new, entity_factors_idx, entity_factors['col3'].values)
        return entity_factors_new
项目:polara    作者:Evfro    | 项目源码 | 文件源码
def _parse_factors(self):
        model_data_path = self.saved_model_path
        model_params = pd.read_csv(model_data_path, skiprows=2, sep=' ',
                        header=None, names=['col1', 'col2', 'col3'])
        num_users = self.data.index.userid.training.new.max() + 1
        num_items = self.data.index.itemid.new.max() + 1

        nu, nf = model_params.iloc[0, :2].astype(np.int64)
        boundary = nu*nf+1
        ni = model_params.iloc[boundary, 0].astype(np.int64)

        users_factors = model_params.iloc[1:boundary, :]

        if model_params.shape[0] == ((nu+ni)*nf + 2): #no biases
            items_biases = None
            items_factors = model_params.iloc[(boundary+1):]
        elif model_params.shape[0] == ((nu+ni)*nf + ni + 3): #has biases
            items_biases = model_params.iloc[(boundary+1):(boundary+1+ni), 0].values
            items_factors = model_params.iloc[(boundary+2+ni):, :]
        else:
            NotImplementedError('{} data is not recognized.'.format(model_data_path))

        if self.positive_only:
            user_mapping = pd.read_csv(self.user_mapping_file, sep = '\t', header=None)
            item_mapping = pd.read_csv(self.item_mapping_file, sep = '\t', header=None)

            user_factors_full = self._remap_factors(user_mapping, users_factors, num_users, nf)
            item_factors_full = self._remap_factors(item_mapping, items_factors, num_items, nf)

            if items_biases is not None:
                bias_factors_full = np.zeros(num_items,)
                np.put(bias_factors_full, item_mapping.loc[:, 1].values, items_biases)
                self._items_biases = bias_factors_full
            else:
                self._items_biases = None

            self._users_factors = user_factors_full
            self._items_factors = item_factors_full
        else:
            self._users_factors = users_factors['col3'].values.reshape(nu, nf)
            self._items_factors = items_factors['col3'].values.reshape(ni, nf)
项目:jiveplot    作者:haavee    | 项目源码 | 文件源码
def solint_numpy_countbin(dsref):
    start = time.time()
    dsref.as_numarray()

    # get the unique time stamps 
    tms   = numpy.unique(dsref.x)

    # check if there is something to be averaged at all
    if len(tms)==len(dsref.x):
        return time.time() - start

    # "bins" will be the destination bin where the quantity
    # will be summed into for each unique time stamp
    # i.e. all data having time stamp tms[0] will be summed into
    #      bin 0, all data having time stamp tms[x] will be summed
    #      into bin x
    #bins  = range( len(tms) )
    # Now we must transform the array of times (dsref.x) into an
    # array with bin indices
    dests = reduce(lambda acc, (ix, tm): \
                      numpy.put(acc, numpy.where(dsref.x==tm), ix) or acc, \
                   enumerate(tms), \
                   numpy.empty(dsref.x.shape, dtype=numpy.int32))
    # Good, now that we have that ...
    sums  = numpy.bincount(dests, weights=dsref.y)
    count = numpy.bincount(dests)
    dsref.y = sums/count
    dsref.x = tms
    return time.time() - start
项目:resin    作者:kylerbrown    | 项目源码 | 文件源码
def taper_matrix(N, W):
    """
    Generates the matrix used for taper calculations.
    """
    N = int(N)
    m = np.zeros((N, N))
    # diagonal
    diag, off_diag = taper_diags(N, W)
    diag_indices = np.arange(0, N) * (N + 1)
    np.put(m, diag_indices, diag)
    np.put(m, (diag_indices[0:-1] + 1), off_diag)
    np.put(m, (diag_indices[1:] - 1), off_diag)
    return m
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_put(self):
        icodes = np.typecodes['AllInteger']
        fcodes = np.typecodes['AllFloat']
        for dt in icodes + fcodes + 'O':
            tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)

            # test 1-d
            a = np.zeros(6, dtype=dt)
            a.put([1, 3, 5], [1, 3, 5])
            assert_equal(a, tgt)

            # test 2-d
            a = np.zeros((2, 3), dtype=dt)
            a.put([1, 3, 5], [1, 3, 5])
            assert_equal(a, tgt.reshape(2, 3))

        for dt in '?':
            tgt = np.array([False, True, False, True, False, True], dtype=dt)

            # test 1-d
            a = np.zeros(6, dtype=dt)
            a.put([1, 3, 5], [True]*3)
            assert_equal(a, tgt)

            # test 2-d
            a = np.zeros((2, 3), dtype=dt)
            a.put([1, 3, 5], [True]*3)
            assert_equal(a, tgt.reshape(2, 3))

        # check must be writeable
        a = np.zeros(6)
        a.flags.writeable = False
        assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])

        # when calling np.put, make sure a
        # TypeError is raised if the object
        # isn't an ndarray
        bad_array = [1, 2, 3]
        assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def plotTrace(data, ids = None):
  """Plot Trajectories color ids"""

  idsall = np.where(ids);
  idsall = [idsall- i for i in np.array(range(kk+1))];
  idsall = np.unique(np.concatenate(idsall));
  np.put(ids, idsall, True);

  plt.figure(32); plt.clf();
  plt.plot(dd[:,0], dd[:,1], color = 'black');
  plt.plot(dd[~ids,0], dd[~ids,1], '.');
  plt.plot(dd[ids, 0], dd[ids, 1], '.', color = 'red')
项目:bareon-allocator    作者:openstack    | 项目源码 | 文件源码
def shift(arr, steps, val=0):
    res_arr = np.roll(arr, steps)
    np.put(res_arr, range(steps), val)

    return res_arr
项目:nanoraw    作者:marcus1487    | 项目源码 | 文件源码
def write_pvals_and_qvals_wig(
        all_stats, wig_base, write_pvals, write_qvals):
    if VERBOSE: sys.stderr.write('Parsing statistics.\n')
    raw_chrm_strand_stats = defaultdict(list)
    for (pval_f, qval_f, pval, qval, pos, chrm, strand,
         cov1, cov2) in all_stats:
        raw_chrm_strand_stats[(chrm, strand)].append((pos, pval, qval))

    chrm_strand_pvals = {}
    chrm_strand_qvals = {}
    for chrm_strand, stats in raw_chrm_strand_stats.iteritems():
        chrm_poss = zip(*stats)[0]
        raw_chrm_pvals = zip(*stats)[1]
        raw_chrm_qvals = zip(*stats)[2]
        max_pos = max(chrm_poss)

        # arrange and store p-values
        chrm_pvals = np.empty(max_pos + 1)
        chrm_pvals[:] = np.nan
        np.put(chrm_pvals, chrm_poss, raw_chrm_pvals)
        chrm_strand_pvals[chrm_strand] = -np.log10(np.maximum(
            SMALLEST_PVAL, chrm_pvals))

        # arrange and store q-values
        chrm_qvals = np.empty(max_pos + 1)
        chrm_qvals[:] = np.nan
        np.put(chrm_qvals, chrm_poss, raw_chrm_qvals)
        chrm_strand_qvals[chrm_strand] = -np.log10(np.maximum(
            SMALLEST_PVAL, chrm_qvals))

    if VERBOSE: sys.stderr.write('Writing statistics wig(s).\n')
    if write_pvals:
        write_wiggle(wig_base, '', chrm_strand_pvals, 'neg_log10_pvals')
    if write_qvals:
        write_wiggle(wig_base, '', chrm_strand_qvals, 'neg_log10_qvals')

    return
项目:lambda-numba    作者:rlhotovy    | 项目源码 | 文件源码
def test_put(self):
        icodes = np.typecodes['AllInteger']
        fcodes = np.typecodes['AllFloat']
        for dt in icodes + fcodes + 'O':
            tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)

            # test 1-d
            a = np.zeros(6, dtype=dt)
            a.put([1, 3, 5], [1, 3, 5])
            assert_equal(a, tgt)

            # test 2-d
            a = np.zeros((2, 3), dtype=dt)
            a.put([1, 3, 5], [1, 3, 5])
            assert_equal(a, tgt.reshape(2, 3))

        for dt in '?':
            tgt = np.array([False, True, False, True, False, True], dtype=dt)

            # test 1-d
            a = np.zeros(6, dtype=dt)
            a.put([1, 3, 5], [True]*3)
            assert_equal(a, tgt)

            # test 2-d
            a = np.zeros((2, 3), dtype=dt)
            a.put([1, 3, 5], [True]*3)
            assert_equal(a, tgt.reshape(2, 3))

        # check must be writeable
        a = np.zeros(6)
        a.flags.writeable = False
        assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])

        # when calling np.put, make sure a
        # TypeError is raised if the object
        # isn't an ndarray
        bad_array = [1, 2, 3]
        assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
项目:multi-task-learning    作者:jg8610    | 项目源码 | 文件源码
def _int_to_tag(tag_int, tag_vocab_size):
    # creates the one-hot vector
    a = np.empty(tag_vocab_size)
    a.fill(0)
    np.put(a, tag_int, 1)
    return a
项目:genome-browser    作者:clintval    | 项目源码 | 文件源码
def impute_zeros(x, y):
    imputed_y = np.zeros(np.ptp(x) + 1)
    np.put(imputed_y,
           ind=np.array(x) - min(x),
           v=np.array(y),
           mode='clip')
    return imputed_y.tolist()
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_put(self):
        icodes = np.typecodes['AllInteger']
        fcodes = np.typecodes['AllFloat']
        for dt in icodes + fcodes + 'O':
            tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)

            # test 1-d
            a = np.zeros(6, dtype=dt)
            a.put([1, 3, 5], [1, 3, 5])
            assert_equal(a, tgt)

            # test 2-d
            a = np.zeros((2, 3), dtype=dt)
            a.put([1, 3, 5], [1, 3, 5])
            assert_equal(a, tgt.reshape(2, 3))

        for dt in '?':
            tgt = np.array([False, True, False, True, False, True], dtype=dt)

            # test 1-d
            a = np.zeros(6, dtype=dt)
            a.put([1, 3, 5], [True]*3)
            assert_equal(a, tgt)

            # test 2-d
            a = np.zeros((2, 3), dtype=dt)
            a.put([1, 3, 5], [True]*3)
            assert_equal(a, tgt.reshape(2, 3))

        # check must be writeable
        a = np.zeros(6)
        a.flags.writeable = False
        assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])

        # when calling np.put, make sure a
        # TypeError is raised if the object
        # isn't an ndarray
        bad_array = [1, 2, 3]
        assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
项目:machine-learning-smartcab    作者:sosegon    | 项目源码 | 文件源码
def reduce_state(self, state):
        if state[2] == 'None':
            np.put(state, [2], [None])
        if state[3] == 'None':
            np.put(state, [3], [None])
        if state[0] == 'left':
            if state[1] == 'green':
                if state[2] == 'forward':
                    np.put(state, [3], [None])
                else:
                    np.put(state, [2], [None])
                    np.put(state, [3], [None])
            else: # red
                np.put(state, [2], [None])
                np.put(state, [3], [None])
        elif state[0] == 'right':
            if state[1] == 'green':
                np.put(state, [2], [None])
                np.put(state, [3], [None])
            else: # red
                if state[3] == 'forward':
                    np.put(state, [2], [None])
                elif state[2] == 'left':
                    np.put(state, [3], [None])
                else:
                    np.put(state, [2], [None])
                    np.put(state, [3], [None])
        else: # 'forward'
            np.put(state, [2], [None])
            np.put(state, [3], [None])

    # Position of the given state in the array self.states
项目:tensorflowbook    作者:thewintersun    | 项目源码 | 文件源码
def train():
  #???
  learning_rate = 0.01

  x = tf.placeholder(tf.float32)
  y = tf.placeholder(tf.float32)

  net_out = inference(x)

  #???????op
  loss = tf.square(net_out - y)

  #?????????????
  opt = tf.train.GradientDescentOptimizer(learning_rate)
  train_op = opt.minimize(loss)

  init = tf.global_variables_initializer()

  with tf.Session() as sess:
    sess.run(init)
    print("start traing....")
    for i in range(1000000):
      train_x, train_y = get_train_data()
      sess.run(train_op, feed_dict={x: train_x, y: train_y})

      if i % 10000 == 0:
        times = int(i / 10000)
        test_x_ndarray = np.arange(0, 2 * np.pi, 0.01)
        test_y_ndarray = np.zeros([len(test_x_ndarray)])
        ind = 0
        for test_x in test_x_ndarray:
          test_y = sess.run(net_out, feed_dict={x: test_x, y: 1})
          np.put(test_y_ndarray, ind, test_y)
          ind += 1
        # ??????sin??????
        # ??????????????sin?????
        draw_correct_line()
        pylab.plot(test_x_ndarray,test_y_ndarray,'--', label =  str(times) + 'times' )
        pylab.show()
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def _inverse_permutation(p):
    """inverse permutation p"""
    n = p.size
    s = np.zeros(n, dtype=np.int32)
    i = np.arange(n, dtype=np.int32)
    np.put(s, p, i)  # s[p] = i
    return s
项目:Alfred    作者:jkachhadia    | 项目源码 | 文件源码
def test_put(self):
        icodes = np.typecodes['AllInteger']
        fcodes = np.typecodes['AllFloat']
        for dt in icodes + fcodes + 'O':
            tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)

            # test 1-d
            a = np.zeros(6, dtype=dt)
            a.put([1, 3, 5], [1, 3, 5])
            assert_equal(a, tgt)

            # test 2-d
            a = np.zeros((2, 3), dtype=dt)
            a.put([1, 3, 5], [1, 3, 5])
            assert_equal(a, tgt.reshape(2, 3))

        for dt in '?':
            tgt = np.array([False, True, False, True, False, True], dtype=dt)

            # test 1-d
            a = np.zeros(6, dtype=dt)
            a.put([1, 3, 5], [True]*3)
            assert_equal(a, tgt)

            # test 2-d
            a = np.zeros((2, 3), dtype=dt)
            a.put([1, 3, 5], [True]*3)
            assert_equal(a, tgt.reshape(2, 3))

        # check must be writeable
        a = np.zeros(6)
        a.flags.writeable = False
        assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])

        # when calling np.put, make sure a
        # TypeError is raised if the object
        # isn't an ndarray
        bad_array = [1, 2, 3]
        assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def put(a, ind, v, mode='raise'):
    """
    Replaces specified elements of an array with given values.

    The indexing works on the flattened target array. `put` is roughly
    equivalent to:

    ::

        a.flat[ind] = v

    Parameters
    ----------
    a : ndarray
        Target array.
    ind : array_like
        Target indices, interpreted as integers.
    v : array_like
        Values to place in `a` at target indices. If `v` is shorter than
        `ind` it will be repeated as necessary.
    mode : {'raise', 'wrap', 'clip'}, optional
        Specifies how out-of-bounds indices will behave.

        * 'raise' -- raise an error (default)
        * 'wrap' -- wrap around
        * 'clip' -- clip to the range

        'clip' mode means that all indices that are too large are replaced
        by the index that addresses the last element along that axis. Note
        that this disables indexing with negative numbers.

    See Also
    --------
    putmask, place

    Examples
    --------
    >>> a = np.arange(5)
    >>> np.put(a, [0, 2], [-44, -55])
    >>> a
    array([-44,   1, -55,   3,   4])

    >>> a = np.arange(5)
    >>> np.put(a, 22, -5, mode='clip')
    >>> a
    array([ 0,  1,  2,  3, -5])

    """
    try:
        put = a.put
    except AttributeError:
        raise TypeError("argument 1 must be numpy.ndarray, "
                        "not {name}".format(name=type(a).__name__))

    return put(ind, v, mode)
项目:seniority_list    作者:rubydatasystems    | 项目源码 | 文件源码
def sort_eg_attributes(df, attributes=['doh', 'ldate'],
                       reverse_list=[0, 0],
                       add_columns=False):
    '''Sort master list attribute columns by employee group in preparation
    for list construction.  The overall master list structure and order is
    unaffected, only the selected attribute columns are sorted (normally
    date-related columns such as doh or ldate)

    inputs
        df
            The master data dataframe (does not need to be sorted)
        attributes
            columns to sort by eg (inplace)
        reverse_list
            If an attribute is to be sorted in reverse order (descending),
            use a '1' in the list position corresponding to the position of
            the attribute within the attributes input
        add_columns
            If True, an additional column for each sorted attribute will be
            added to the resultant dataframe, with the suffix '_sort' added
            to it.
    '''
    date_cols = []
    for col in df:
        if (df[col]).dtype == 'datetime64[ns]':
            date_cols.append(col)
    try:
        df.sort_values(['eg', 'eg_number'], inplace=True)
    except LookupError:
        df.sort_values(['eg', 'eg_order'], inplace=True)

    egs = df.eg.values
    i = 0
    for measure in attributes:
        data = df[measure].values
        measure_col = np.empty_like(data)
        for eg in pd.unique(df.eg):
            measure_slice = data[egs == eg]
            measure_slice_index = np.where(egs == eg)[0]
            measure_slice_sorted = np.sort(measure_slice, axis=0)

            if reverse_list[i]:
                measure_slice_invert = measure_slice_sorted[::-1]
                measure_slice_sorted = measure_slice_invert
            np.put(measure_col, measure_slice_index, measure_slice_sorted)

        if add_columns:
            col_name = measure + '_sort'
        else:
            col_name = measure

        df[col_name] = measure_col

        if measure in date_cols:
            df[col_name] = pd.to_datetime(df[col_name].dt.date)
        i += 1

    return df
项目:seniority_list    作者:rubydatasystems    | 项目源码 | 文件源码
def mark_quantiles(df, quantiles=10):
    '''add a column to the input dataframe identifying quantile membership as
    integers (the column is named "quantile").  The quantile membership
    (category) is calculated for each employee group separately, based on
    the employee population in month zero.

    The output dataframe permits attributes for employees within month zero
    quantile categories to be be analyzed throughout all the months of the
    data model.

    The number of quantiles to create within each employee group is selected
    by the "quantiles" input.

    The function utilizes numpy arrays and functions to compute the quantile
    assignments, and pandas index data alignment feature to assign month zero
    quantile membership to the long-form, multi-month output dataframe.

    This function is used within the quantile_groupby function.

    inputs
        df (dataframe)
            Any pandas dataframe containing an "eg" (employee group) column
        quantiles (integer)
            The number of quantiles to create.

            example:

            If the input is 10, the output dataframe will be a column of
            integers 1 - 10.  The count of each integer will be the same.
            The first quantile members will be marked with a 1, the second
            with 2, etc., through to the last quantile, 10.
    '''
    mult = 1000
    mod = mult / quantiles
    aligned_df = df.copy()
    df = df[df.mnum == 0][['eg']].copy()
    eg_arr = df.eg.values
    bins_arr = np.zeros_like(eg_arr)
    unique_egs = np.arange(eg_arr.max()) + 1
    for eg in unique_egs:
        eg_count = eg_arr[eg_arr == eg].size
        this_eg_arr = np.clip((np.arange(eg_count) + 1) / eg_count, 0, .9999)
        this_bin_arr = (this_eg_arr * mult // mod).astype(int) + 1
        np.put(bins_arr, np.where(eg_arr == eg)[0], this_bin_arr)

    df['quantile'] = bins_arr
    aligned_df['quantile'] = df['quantile']
    return aligned_df
项目:seniority_list    作者:rubydatasystems    | 项目源码 | 文件源码
def make_delayed_job_counts(imp_month,
                            delayed_jnums,
                            lower,
                            upper):
    '''Make an array of job counts to be inserted into the long_form job counts
    array of the job assignment function.  The main assignment function calls
    this function prior to the implementation month. The array output of this
    function is inserted into what will become the job count column.
    These jobs are from the standalone job results.
    The job count column displays a total monthly count of the job in the
    corresponding jnum (job number) column.
    inputs
        imp_month (integer)
            implementation month, defined by settings dictionary
        delayed_jnums (numpy array)
            array of job numbers, normally data from the start of the model
            through the implementation month
        lower (numpy array)
            array of indexes marking the beginning of data for each month
            within a larger array of stacked, multi-month data
        upper (numpy array)
            array of indexes marking the end of data for each month
    '''
    imp_high = upper[imp_month]
    stand_job_counts = np.zeros(imp_high)
    job_numbers = sorted(list(set(delayed_jnums[:imp_high])))

    for month in range(imp_month + 1):
        low = lower[month]
        high = upper[month]
        jnums_range = delayed_jnums[low:high]
        stand_range = stand_job_counts[low:high]

        for job in job_numbers:
            job_indexes = np.where(jnums_range == job)[0]
            np.put(stand_range,
                   job_indexes,
                   job_indexes.size)

    return stand_job_counts


# MAKE GAIN_LOSS_TABLE
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def __init__(self, data_arrays, time_steps, batch_size, nfeatures,
                 total_iterations=None):
        self.batch_size = batch_size
        self.time_steps = time_steps
        self.nfeatures = nfeatures
        self.index = 0

        # make sure input is in dict format
        if isinstance(data_arrays, dict):
            self.data_arrays = dict(data_arrays)
        else:
            raise ValueError("Must provide dict as input")

        # number of examples
        self.ndata = len(self.data_arrays['inp_txt'])

        # number of examples (as integer multiple of batch size)
        self.ndata = self.ndata // (self.batch_size) * self.batch_size

        self.nbatches = self.ndata // self.batch_size

        if self.ndata < self.batch_size:
            raise ValueError('Number of examples is smaller than the batch size')

        self.total_iterations = self.nbatches if total_iterations is None else total_iterations

        # reshape array for batch and batch size dimensions
        self.data_arrays['inp_txt'] = \
            self.data_arrays['inp_txt'][:self.ndata][:][:].reshape(
            self.batch_size,
            self.nbatches,
            self.time_steps,
            self.nfeatures)

        self.data_arrays['tgt_txt'] = \
            self.data_arrays['tgt_txt'][:self.ndata][:].reshape(
            self.batch_size,
            self.nbatches,
            self.time_steps)

        self.data_arrays['teacher_tgt'] = \
            self.data_arrays['teacher_tgt'][:self.ndata][:][:].reshape(
            self.batch_size,
            self.nbatches,
            self.time_steps,
            self.nfeatures)

        # Teacher Forcing
        self.data_arrays['teacher_tgt'] = np.roll(self.data_arrays['teacher_tgt'], shift=1, axis=2)
        # put a start token (0, 0) as the first decoder input
        for i in range(self.batch_size):
            for j in range(self.nbatches):
                for k in range(self.nfeatures):
                    np.put(self.data_arrays['teacher_tgt'][i][j][0], [k], [0])
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def put(a, ind, v, mode='raise'):
    """
    Replaces specified elements of an array with given values.

    The indexing works on the flattened target array. `put` is roughly
    equivalent to:

    ::

        a.flat[ind] = v

    Parameters
    ----------
    a : ndarray
        Target array.
    ind : array_like
        Target indices, interpreted as integers.
    v : array_like
        Values to place in `a` at target indices. If `v` is shorter than
        `ind` it will be repeated as necessary.
    mode : {'raise', 'wrap', 'clip'}, optional
        Specifies how out-of-bounds indices will behave.

        * 'raise' -- raise an error (default)
        * 'wrap' -- wrap around
        * 'clip' -- clip to the range

        'clip' mode means that all indices that are too large are replaced
        by the index that addresses the last element along that axis. Note
        that this disables indexing with negative numbers.

    See Also
    --------
    putmask, place

    Examples
    --------
    >>> a = np.arange(5)
    >>> np.put(a, [0, 2], [-44, -55])
    >>> a
    array([-44,   1, -55,   3,   4])

    >>> a = np.arange(5)
    >>> np.put(a, 22, -5, mode='clip')
    >>> a
    array([ 0,  1,  2,  3, -5])

    """
    try:
        put = a.put
    except AttributeError:
        raise TypeError("argument 1 must be numpy.ndarray, "
                        "not {name}".format(name=type(a).__name__))

    return put(ind, v, mode)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def put(a, ind, v, mode='raise'):
    """
    Replaces specified elements of an array with given values.

    The indexing works on the flattened target array. `put` is roughly
    equivalent to:

    ::

        a.flat[ind] = v

    Parameters
    ----------
    a : ndarray
        Target array.
    ind : array_like
        Target indices, interpreted as integers.
    v : array_like
        Values to place in `a` at target indices. If `v` is shorter than
        `ind` it will be repeated as necessary.
    mode : {'raise', 'wrap', 'clip'}, optional
        Specifies how out-of-bounds indices will behave.

        * 'raise' -- raise an error (default)
        * 'wrap' -- wrap around
        * 'clip' -- clip to the range

        'clip' mode means that all indices that are too large are replaced
        by the index that addresses the last element along that axis. Note
        that this disables indexing with negative numbers.

    See Also
    --------
    putmask, place

    Examples
    --------
    >>> a = np.arange(5)
    >>> np.put(a, [0, 2], [-44, -55])
    >>> a
    array([-44,   1, -55,   3,   4])

    >>> a = np.arange(5)
    >>> np.put(a, 22, -5, mode='clip')
    >>> a
    array([ 0,  1,  2,  3, -5])

    """
    return a.put(ind, v, mode)
项目:aws-lambda-numpy    作者:vitolimandibhrata    | 项目源码 | 文件源码
def put(a, ind, v, mode='raise'):
    """
    Replaces specified elements of an array with given values.

    The indexing works on the flattened target array. `put` is roughly
    equivalent to:

    ::

        a.flat[ind] = v

    Parameters
    ----------
    a : ndarray
        Target array.
    ind : array_like
        Target indices, interpreted as integers.
    v : array_like
        Values to place in `a` at target indices. If `v` is shorter than
        `ind` it will be repeated as necessary.
    mode : {'raise', 'wrap', 'clip'}, optional
        Specifies how out-of-bounds indices will behave.

        * 'raise' -- raise an error (default)
        * 'wrap' -- wrap around
        * 'clip' -- clip to the range

        'clip' mode means that all indices that are too large are replaced
        by the index that addresses the last element along that axis. Note
        that this disables indexing with negative numbers.

    See Also
    --------
    putmask, place

    Examples
    --------
    >>> a = np.arange(5)
    >>> np.put(a, [0, 2], [-44, -55])
    >>> a
    array([-44,   1, -55,   3,   4])

    >>> a = np.arange(5)
    >>> np.put(a, 22, -5, mode='clip')
    >>> a
    array([ 0,  1,  2,  3, -5])

    """
    return a.put(ind, v, mode)
项目:satpy    作者:pytroll    | 项目源码 | 文件源码
def get_dataset(self, key, info=None, out=None, xslice=None, yslice=None):
        """Load a dataset
        """
        if key in self.cache:
            return self.cache[key]
        # Type dictionary
        typedict = {"af": "flash_accumulation",
                    "afa": "accumulated_flash_area",
                    "afr": "flash_radiance",
                    "lgr": "radiance",
                    "lef": "radiance",
                    "lfl": "radiance"}

        # Get lightning data out of NetCDF container
        logger.debug("Key: {}".format(key.name))
        # Create reference grid
        grid = np.full((self.nlines, self.ncols), np.NaN)
        # Set slices to full disc extent
        if xslice is None:
            xslice = slice(0, self.ncols, None)
        if yslice is None:
            yslice = slice(0, self.nlines, None)
        logger.debug("Slices - x: {}, y: {}".format(xslice, yslice))
        # Get product values
        values = self.nc[typedict[key.name]]
        rows = self.nc['row']
        cols = self.nc['column']
        logger.debug('[ Number of values ] : {}'.format((len(values))))
        logger.debug('[Min/Max] : <{}> / <{}>'.format(np.min(values),
                                                      np.max(values)))
        # Convert xy coordinates to flatten indices
        ids = np.ravel_multi_index([rows, cols], grid.shape)
        # Replace NaN values with data
        np.put(grid, ids, values)

        # Correct for bottom left origin in LI row/column indices.
        rotgrid = np.flipud(grid)
        logger.debug('Data shape: {}, {}'.format(yslice, xslice))
        # Rotate the grid by 90 degree clockwise
        rotgrid = np.rot90(rotgrid, 3)
        logger.warning("LI data has been rotated to fit to reference grid. \
                        Works only for test dataset")
        # Slice the gridded lighting data
        slicegrid = rotgrid[yslice, xslice]
        # Mask invalid values
        ds = np.ma.masked_where(np.isnan(slicegrid), slicegrid)
        # Create dataset object
        out.data[:] = np.ma.getdata(ds)
        out.mask[:] = np.ma.getmask(ds)
        out.info.update(key.to_dict())

        return(out)
项目:AlphaToe    作者:DanielSlater    | 项目源码 | 文件源码
def get_stochastic_network_move(session, input_layer, output_layer, board_state, side,
                                valid_only=False, game_spec=None):
    """Choose a move for the given board_state using a stocastic policy. A move is selected using the values from the
     output_layer as a categorical probability distribution to select a single move

    Args:
        session (tf.Session): Session used to run this network
        input_layer (tf.Placeholder): Placeholder to the network used to feed in the board_state
        output_layer (tf.Tensor): Tensor that will output the probabilities of the moves, we expect this to be of
            dimesensions (None, board_squares) and the sum of values across the board_squares to be 1.
        board_state: The board_state we want to get the move for.
        side: The side that is making the move.

    Returns:
        (np.array) It's shape is (board_squares), and it is a 1 hot encoding for the move the network has chosen.
    """
    np_board_state = np.array(board_state)
    if side == -1:
        np_board_state = -np_board_state

    np_board_state = np_board_state.reshape(1, *input_layer.get_shape().as_list()[1:])
    probability_of_actions = session.run(output_layer,
                                         feed_dict={input_layer: np_board_state})[0]

    if valid_only:
        available_moves = list(game_spec.available_moves(board_state))
        if len(available_moves) == 1:
            move = np.zeros(game_spec.board_squares())
            np.put(move, game_spec.tuple_move_to_flat(available_moves[0]), 1)
            return move
        available_moves_flat = [game_spec.tuple_move_to_flat(x) for x in available_moves]
        for i in range(game_spec.board_squares()):
            if i not in available_moves_flat:
                probability_of_actions[i] = 0.

        prob_mag = sum(probability_of_actions)
        if prob_mag != 0.:
            probability_of_actions /= sum(probability_of_actions)

    try:
        move = np.random.multinomial(1, probability_of_actions)
    except ValueError:
        # sometimes because of rounding errors we end up with probability_of_actions summing to greater than 1.
        # so need to reduce slightly to be a valid value
        move = np.random.multinomial(1, probability_of_actions / (1. + 1e-6))

    return move
项目:lambda-numba    作者:rlhotovy    | 项目源码 | 文件源码
def put(a, ind, v, mode='raise'):
    """
    Replaces specified elements of an array with given values.

    The indexing works on the flattened target array. `put` is roughly
    equivalent to:

    ::

        a.flat[ind] = v

    Parameters
    ----------
    a : ndarray
        Target array.
    ind : array_like
        Target indices, interpreted as integers.
    v : array_like
        Values to place in `a` at target indices. If `v` is shorter than
        `ind` it will be repeated as necessary.
    mode : {'raise', 'wrap', 'clip'}, optional
        Specifies how out-of-bounds indices will behave.

        * 'raise' -- raise an error (default)
        * 'wrap' -- wrap around
        * 'clip' -- clip to the range

        'clip' mode means that all indices that are too large are replaced
        by the index that addresses the last element along that axis. Note
        that this disables indexing with negative numbers.

    See Also
    --------
    putmask, place

    Examples
    --------
    >>> a = np.arange(5)
    >>> np.put(a, [0, 2], [-44, -55])
    >>> a
    array([-44,   1, -55,   3,   4])

    >>> a = np.arange(5)
    >>> np.put(a, 22, -5, mode='clip')
    >>> a
    array([ 0,  1,  2,  3, -5])

    """
    try:
        put = a.put
    except AttributeError:
        raise TypeError("argument 1 must be numpy.ndarray, "
                        "not {name}".format(name=type(a).__name__))

    return put(ind, v, mode)
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def put(a, ind, v, mode='raise'):
    """
    Replaces specified elements of an array with given values.

    The indexing works on the flattened target array. `put` is roughly
    equivalent to:

    ::

        a.flat[ind] = v

    Parameters
    ----------
    a : ndarray
        Target array.
    ind : array_like
        Target indices, interpreted as integers.
    v : array_like
        Values to place in `a` at target indices. If `v` is shorter than
        `ind` it will be repeated as necessary.
    mode : {'raise', 'wrap', 'clip'}, optional
        Specifies how out-of-bounds indices will behave.

        * 'raise' -- raise an error (default)
        * 'wrap' -- wrap around
        * 'clip' -- clip to the range

        'clip' mode means that all indices that are too large are replaced
        by the index that addresses the last element along that axis. Note
        that this disables indexing with negative numbers.

    See Also
    --------
    putmask, place

    Examples
    --------
    >>> a = np.arange(5)
    >>> np.put(a, [0, 2], [-44, -55])
    >>> a
    array([-44,   1, -55,   3,   4])

    >>> a = np.arange(5)
    >>> np.put(a, 22, -5, mode='clip')
    >>> a
    array([ 0,  1,  2,  3, -5])

    """
    try:
        put = a.put
    except AttributeError:
        raise TypeError("argument 1 must be numpy.ndarray, "
                        "not {name}".format(name=type(a).__name__))

    return put(ind, v, mode=mode)
项目:RibbaPi    作者:stahlfabrik    | 项目源码 | 文件源码
def handle(self):
        data = self.request[0].strip()
        data_length = len(data)
        # check packet start byte 0x9C
        if not data_length >= 8 and data[0] == 0x9c:
            return
        packet_type = data[1]
        frame_size = (data[2] << 8) + data[3]
        # check consistency of length and proper frame ending
        if not (data_length - 7 == frame_size) and data[-1] == 0x36:
            return

        packet_number = data[4]
        number_of_packets = data[5]

        if packet_type == 0xDA:  # data frame
            # tell ribbapi that tpm2_net data is received
            self.server.ribbapi.receiving_data.set()
            self.server.update_time()

            if packet_number == 0:
                self.server.misbehaving = True
            if packet_number == (1 if not self.server.misbehaving else 0):
                self.server.tmp_buffer_index = 0

            upper = min(self.server.tmp_buffer.size,
                        self.server.tmp_buffer_index + frame_size)
            arange = np.arange(self.server.tmp_buffer_index,
                               upper)
            np.put(self.server.tmp_buffer, arange, list(data[6:-1]))
            self.server.tmp_buffer_index = self.server.tmp_buffer_index + frame_size
            if packet_number == (number_of_packets if not self.server.misbehaving else number_of_packets - 1):
                if not self.server.ribbapi.current_animation:
                    self.server.ribbapi.frame_queue.put(self.server.tmp_buffer.copy())
        elif data[1] == 0xC0:  # command
            # NOT IMPLEMENTED
            return
        elif data[1] == 0xAA:  # request response
            # NOT IMPLEMENTED
            return
        else:  # no valid tmp2 packet type
            return
项目:Alfred    作者:jkachhadia    | 项目源码 | 文件源码
def put(a, ind, v, mode='raise'):
    """
    Replaces specified elements of an array with given values.

    The indexing works on the flattened target array. `put` is roughly
    equivalent to:

    ::

        a.flat[ind] = v

    Parameters
    ----------
    a : ndarray
        Target array.
    ind : array_like
        Target indices, interpreted as integers.
    v : array_like
        Values to place in `a` at target indices. If `v` is shorter than
        `ind` it will be repeated as necessary.
    mode : {'raise', 'wrap', 'clip'}, optional
        Specifies how out-of-bounds indices will behave.

        * 'raise' -- raise an error (default)
        * 'wrap' -- wrap around
        * 'clip' -- clip to the range

        'clip' mode means that all indices that are too large are replaced
        by the index that addresses the last element along that axis. Note
        that this disables indexing with negative numbers.

    See Also
    --------
    putmask, place

    Examples
    --------
    >>> a = np.arange(5)
    >>> np.put(a, [0, 2], [-44, -55])
    >>> a
    array([-44,   1, -55,   3,   4])

    >>> a = np.arange(5)
    >>> np.put(a, 22, -5, mode='clip')
    >>> a
    array([ 0,  1,  2,  3, -5])

    """
    try:
        put = a.put
    except AttributeError:
        raise TypeError("argument 1 must be numpy.ndarray, "
                        "not {name}".format(name=type(a).__name__))

    return put(ind, v, mode)