Python numpy 模块,zeros() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.zeros()

项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def pad_batch(mini_batch):
    mini_batch_size = len(mini_batch)
#     print mini_batch.shape
#     print mini_batch
    max_sent_len1 = int(np.max([len(x[0]) for x in mini_batch]))
    max_sent_len2 = int(np.max([len(x[1]) for x in mini_batch]))
#     print max_sent_len1, max_sent_len2
#     max_token_len = int(np.mean([len(val) for sublist in mini_batch for val in sublist]))
    main_matrix1 = np.zeros((mini_batch_size, max_sent_len1), dtype= np.int)
    main_matrix2 = np.zeros((mini_batch_size, max_sent_len2), dtype= np.int)
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[0]):
            try:
                main_matrix1[i,j] = j
            except IndexError:
                pass
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[1]):
            try:
                main_matrix2[i,j] = j
            except IndexError:
                pass
    main_matrix1_t = Variable(torch.from_numpy(main_matrix1))
    main_matrix2_t = Variable(torch.from_numpy(main_matrix2))
#     print main_matrix1_t.size()
#     print main_matrix2_t.size()
    return [main_matrix1_t, main_matrix2_t]
#     return [Variable(torch.cat((main_matrix1_t, main_matrix2_t), 0))

# def pad_batch(mini_batch):
# #     print mini_batch
# #     print type(mini_batch)
# #     print mini_batch.shape
# #     for i, _ in enumerate(mini_batch):
# #         print i, _
#     return [Variable(torch.from_numpy(np.asarray(_))) for _ in mini_batch[0]]
项目:rca-evaluation    作者:sieve-microservices    | 项目源码 | 文件源码
def roll_zeropad(a, shift, axis=None):
    a = np.asanyarray(a)
    if shift == 0: return a
    if axis is None:
        n = a.size
        reshape = True
    else:
        n = a.shape[axis]
        reshape = False
    if np.abs(shift) > n:
        res = np.zeros_like(a)
    elif shift < 0:
        shift += n
        zeros = np.zeros_like(a.take(np.arange(n-shift), axis))
        res = np.concatenate((a.take(np.arange(n-shift,n), axis), zeros), axis)
    else:
        zeros = np.zeros_like(a.take(np.arange(n-shift,n), axis))
        res = np.concatenate((zeros, a.take(np.arange(n-shift), axis)), axis)
    if reshape:
        return res.reshape(a.shape)
    else:
        return res
项目:Modeling_Preparation    作者:Yangruipis    | 项目源码 | 文件源码
def _generate_data():
    """
    ?????
    ????u(k-1) ? y(k-1)?????y(k)
    """
    # u = np.random.uniform(-1,1,200)
    # y=[]
    # former_y_value = 0
    # for i in np.arange(0,200):
    #     y.append(former_y_value)
    #     next_y_value = (29.0 / 40) * np.sin(
    #         (16.0 * u[i] + 8 * former_y_value) / (3.0 + 4.0 * (u[i] ** 2) + 4 * (former_y_value ** 2))) \
    #                    + (2.0 / 10) * u[i] + (2.0 / 10) * former_y_value
    #     former_y_value = next_y_value
    # return u,y
    u1 = np.random.uniform(-np.pi,np.pi,200)
    u2 = np.random.uniform(-1,1,200)
    y = np.zeros(200)
    for i in range(200):
        value = np.sin(u1[i]) + u2[i]
        y[i] =  value
    return u1, u2, y
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def bac_metric (solution, prediction, task='binary.classification'):
    ''' Compute the normalized balanced accuracy. The binarization and 
    the normalization differ for the multi-label and multi-class case. '''
    label_num = solution.shape[1]
    score = np.zeros(label_num)
    bin_prediction = binarize_predictions(prediction, task)
    [tn,fp,tp,fn] = acc_stat(solution, bin_prediction)
    # Bounding to avoid division by 0
    eps = 1e-15
    tp = sp.maximum (eps, tp)
    pos_num = sp.maximum (eps, tp+fn)
    tpr = tp / pos_num # true positive rate (sensitivity)
    if (task != 'multiclass.classification') or (label_num==1):
        tn = sp.maximum (eps, tn)
        neg_num = sp.maximum (eps, tn+fp)
        tnr = tn / neg_num # true negative rate (specificity)
        bac = 0.5*(tpr + tnr)
        base_bac = 0.5     # random predictions for binary case
    else: 
        bac = tpr
        base_bac = 1./label_num # random predictions for multiclass case
    bac = mvmean(bac)     # average over all classes
    # Normalize: 0 for random, 1 for perfect
    score = (bac - base_bac) / sp.maximum(eps, (1 - base_bac))
    return score
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def replace_missing(X):
    # This is ugly, but
    try:
        if X.getformat()=='csr':
            return X
    except:
    X[np.isnan(X)]=-999.0 #djajetic 05.09.2015
    return X #djajetic 05.09.2015

        p=len(X)
        nn=len(X[0])*2
        XX = np.zeros([p,nn])
        for i in range(len(X)):
            line = X[i]
            line1 = [0 if np.isnan(x) else x for x in line]
            line2 = [1 if np.isnan(x) else 0 for x in line] # indicator of missingness
            XX[i] = line1 + line2
    return XX
项目:cnn-graph-classification    作者:giannisnik    | 项目源码 | 文件源码
def compute_nystrom(ds_name, use_node_labels, embedding_dim, community_detection_method, kernels):
    if ds_name=="SYNTHETIC":
        graphs, labels = generate_synthetic()
    else:
        graphs, labels = load_data(ds_name, use_node_labels)
    communities, subgraphs = compute_communities(graphs, use_node_labels, community_detection_method)

    print("Number of communities: ", len(communities))
    lens = []
    for community in communities:
        lens.append(community.number_of_nodes())

    print("Average size: %.2f" % np.mean(lens))
    Q=[]
    for idx, k in enumerate(kernels):
        model = Nystrom(k, n_components=embedding_dim)
        model.fit(communities)
        Q_t = model.transform(communities)
        Q_t = np.vstack([np.zeros(embedding_dim), Q_t])
        Q.append(Q_t)

    return Q, subgraphs, labels, Q_t.shape
项目:mpiFFT4py    作者:spectralDNS    | 项目源码 | 文件源码
def test_FFT2(FFT2):
    N = FFT2.N
    if FFT2.rank == 0:
        A = random(N).astype(FFT2.float)

    else:
        A = zeros(N, dtype=FFT2.float)

    atol, rtol = (1e-10, 1e-8) if FFT2.float is float64 else (5e-7, 1e-4)
    FFT2.comm.Bcast(A, root=0)
    a = zeros(FFT2.real_shape(), dtype=FFT2.float)
    c = zeros(FFT2.complex_shape(), dtype=FFT2.complex)
    a[:] = A[FFT2.real_local_slice()]
    c = FFT2.fft2(a, c)
    B2 = zeros(FFT2.global_complex_shape(), dtype=FFT2.complex)
    B2 = rfft2(A, B2, axes=(0,1))
    assert allclose(c, B2[FFT2.complex_local_slice()], rtol, atol)
    a = FFT2.ifft2(c, a)
    assert allclose(a, A[FFT2.real_local_slice()], rtol, atol)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def load_data(from_file, input_words, grammar, max_length):
    inputs = []
    input_lengths = []
    parses = []
    labels = []
    label_lengths = []
    with open(from_file, 'r') as data:
        for line in data:
            split = line.strip().split('\t')
            if len(split) == 4:
                _, sentence, canonical, parse = split
            else:
                _, sentence, canonical = split
                parse = None
            input, in_len = vectorize(sentence, input_words, max_length, add_eos=False)
            inputs.append(input)
            input_lengths.append(in_len)
            label, label_len = grammar.vectorize_program(canonical, max_length)
            labels.append(label)
            label_lengths.append(label_len)
            if parse is not None:
                parses.append(vectorize_constituency_parse(parse, max_length, in_len))
            else:
                parses.append(np.zeros((2*max_length-1,), dtype=np.bool))
    return inputs, input_lengths, parses, labels, label_lengths
项目:uwb_tracker_ros    作者:eth-ait    | 项目源码 | 文件源码
def _read_unit_offsets(self):
        if not rospy.has_param('~num_of_units'):
            rospy.logwarn("No unit offset parameters found!")
        num_of_units = rospy.get_param('~num_of_units', 0)
        self._unit_offsets = np.zeros((num_of_units, 3))
        self._unit_coefficients = np.zeros((num_of_units, 2))
        for i in xrange(num_of_units):
            unit_params = rospy.get_param('~unit_{}'.format(i))
            x = unit_params['x']
            y = unit_params['y']
            z = unit_params['z']
            self._unit_offsets[i, :] = [x, y, z]
            p0 = unit_params['p0']
            p1 = unit_params['p1']
            self._unit_coefficients[i, :] = [p0, p1]
        rospy.loginfo("Unit offsets: {}".format(self._unit_offsets))
        rospy.loginfo("Unit coefficients: {}".format(self._unit_coefficients))
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def put_images_on_grid(images, shape=(16,8)):
    nrof_images = images.shape[0]
    img_size = images.shape[1]
    bw = 3
    img = np.zeros((shape[1]*(img_size+bw)+bw, shape[0]*(img_size+bw)+bw, 3), np.float32)
    for i in range(shape[1]):
        x_start = i*(img_size+bw)+bw
        for j in range(shape[0]):
            img_index = i*shape[0]+j
            if img_index>=nrof_images:
                break
            y_start = j*(img_size+bw)+bw
            img[x_start:x_start+img_size, y_start:y_start+img_size, :] = images[img_index, :, :, :]
        if img_index>=nrof_images:
            break
    return img
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def encode_and_store(batch_x, output_dir, file_name):
    """
    Args:
        1. batch_x:         Batch of 32*32 images which will go inside our autoencoder.
        2. output_dir:      Dir path for storing all encoded features for given `batch_x`.
                            Features will be stored in the form of JSON file.
        3. file_name:       File name of JSON file.
    """
    global AUTO_ENCODER
    if AUTO_ENCODER is None:
        load_AE()

    norm_batch = np.zeros(batch_x.shape)
    for i in range(len(batch_x)):
        norm_batch[i] = (batch_x[i] - np.mean(batch_x[i])) / np.std(batch_x[i])

    output_dict = {
        'name' : file_name,
        'encoded': AUTO_ENCODER.transform(norm_batch).tolist()}

    with open(output_dir+file_name+'.json', 'w') as f:
        json.dump(output_dict, f)
项目:pyfds    作者:emtpb    | 项目源码 | 文件源码
def material_vector(self, mat_parameter):
        """Get a vector that contains the specified material parameter for every point of the
        field.

        Args:
            mat_parameter: Material parameter of interest.

        Returns:
            Vector which contains the specified material parameter for each point in the field.
        """

        param_found = False
        mat_vector = np.zeros(self.num_points)

        for mat_reg in self.material_regions:
            for mat in mat_reg.materials:
                if hasattr(mat, mat_parameter):
                    mat_vector[mat_reg.region.indices] = getattr(mat, mat_parameter)
                    param_found = True

        if not param_found:
            wn.warn('Material parameter {} not found in set materials. Returning zeros.'
                    .format(mat_parameter), stacklevel=2)

        return mat_vector
项目:fxnn    作者:khaotik    | 项目源码 | 文件源码
def compile(self,s_inputs_, s_loss_, v_params_, s_grads_=None, s_reg_=0, fetches_=None, updates_=None, givens_=None, trunc_grad_=None, profile_=False):
        def get_shared_shape(v):
            return v.get_value(borrow=True, return_internal_type=True).shape
        if type(s_inputs_) not in (list, tuple):
            s_inputs_ = [s_inputs_]
        if isinstance(updates_, dict):
            updates_= list(updates_.items())
        super(AdamSGD,self).compile(
            s_inputs_, s_loss_, v_params_, s_reg_=s_reg_, s_grads_=s_grads_, trunc_grad_=trunc_grad_)
        self.v_m = [th.shared(value=np.zeros(get_shared_shape(p), th.config.floatX), name='adam_m_'+p.name if p.name is not None else None) for p in v_params_]
        self.v_v = [th.shared(value=np.zeros(get_shared_shape(p), th.config.floatX), name='adam_v_'+p.name if p.name is not None else None) for p in v_params_]
        s_b1 = T.scalar('adam_b1'); s_b2 = T.scalar('adam_b2')
        s_b1s = T.scalar('adam_b1s'); s_b2s = T.scalar('adam_b2s')
        update_m = [(m, (m*s_b1 + (1.-s_b1)*g)) for m,g in zip(self.v_m,self.s_grads)]
        update_v = [(v, (v*s_b2 + (1.-s_b2)*g*g)) for v,g in zip(self.v_v,self.s_grads)]
        apply_grad = [(p, p-(s_b1s*m*self.s_lr)/(T.sqrt(s_b2s*v)+self.eps)) for p,m,v in zip(v_params_,self.v_m,self.v_v)]
        self.fn_train = th.function(
            inputs=[self.s_lr]+s_inputs_+[s_b1,s_b2,s_b1s,s_b2s],
            outputs=fetches_,
            updates=update_m+update_v+apply_grad+(updates_ if updates_ else []),
            on_unused_input='warn',
            givens=givens_, profile=profile_)
        self.fn_rst = th.function(inputs=[], updates=[(v, T.zeros_like(v)) for v in self.v_m+self.v_v], profile=profile_)
        return self.fn_train
项目:rca-evaluation    作者:sieve-microservices    | 项目源码 | 文件源码
def silhouette_score(series, clusters):
    distances = np.zeros((series.shape[0], series.shape[0]))
    for idx_a, metric_a in enumerate(series):
        for idx_b, metric_b in enumerate(series):
            distances[idx_a, idx_b] = _sbd(metric_a, metric_b)[0]
    labels = np.zeros(series.shape[0])
    for i, (cluster, indicies) in enumerate(clusters):
        for index in indicies:
            labels[index] = i

    # silhouette is only defined, if we have 2 clusters with assignments at 
    # minimum
    if len(np.unique(labels)) == 1 or (len(np.unique(labels)) >= distances.shape[0]):
    #if len(np.unique(labels)) == 1:
        return labels, -1
    else:
        return labels, _silhouette_score(distances, labels, metric='precomputed')
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def make_complete_graph(num_vertices):
    """Constructs a complete graph.

    The pairing function is: k = v1 + v2 * (v2 - 1) // 2

    Args:
        num_vertices: Number of vertices.

    Returns: A tuple with elements:
        V: Number of vertices.
        K: Number of edges.
        grid: a 3 x K grid of (edge, vertex, vertex) triples.
    """
    V = num_vertices
    K = V * (V - 1) // 2
    grid = np.zeros([3, K], np.int32)
    k = 0
    for v2 in range(V):
        for v1 in range(v2):
            grid[:, k] = [k, v1, v2]
            k += 1
    return grid
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def make_tree(edges):
    """Constructs a tree graph from a set of (vertex,vertex) pairs.

    Args:
        edges: A list or set of unordered (vertex, vertex) pairs.

    Returns: A tuple with elements:
        V: Number of vertices.
        E: Number of edges.
        grid: a 3 x E grid of (edge, vertex, vertex) triples.
    """
    assert all(isinstance(edge, tuple) for edge in edges)
    edges = [tuple(sorted(edge)) for edge in edges]
    edges.sort()
    E = len(edges)
    grid = np.zeros([3, E], np.int32)
    for e, (v1, v2) in enumerate(edges):
        grid[:, e] = [e, v1, v2]
    return grid
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def jit_remove_edge(grid, e2k, neighbors, components, e):
    """Remove an edge from a spanning tree."""
    k = e2k[e]
    v1, v2 = grid[1:3, k]
    jit_set_remove(neighbors[v1], v2)
    jit_set_remove(neighbors[v2], v1)
    stack = np.zeros(neighbors.shape[0], np.int16)
    jit_set_add(stack, v1)
    while stack[0]:
        v1 = jit_set_pop(stack)
        components[v1] = True
        for i in range(neighbors[v1, 0]):
            v2 = neighbors[v1, i + 1]
            if not components[v2]:
                jit_set_add(stack, v2)
    return k
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def triangular_to_square(grid, triangle):
    """Convert a packed triangular matrix to a square matrix.

    Args:
        grid: A 3 x K array as returned by make_complete_graph().
        triangle: A length-K array.

    Returns:
        A square symmetric V x V array with zero on the diagonal.
    """
    K = len(triangle)
    assert grid.shape == (3, K)
    V = int(round(0.5 + (0.25 + 2 * K)**0.5))
    assert K == V * (V - 1) // 2
    square = np.zeros([V, V], dtype=triangle.dtype)
    square[grid[1, :], grid[2, :]] = triangle
    square[grid[2, :], grid[1, :]] = triangle
    return square
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def generate_model_file(num_rows, num_cols, num_cats=4, rate=1.0):
    """Generate a random model.

    Returns:
        The path to a gzipped pickled model.
    """
    path = os.path.join(DATA, '{}-{}-{}-{:0.1f}.model.pkz'.format(
        num_rows, num_cols, num_cats, rate))
    V = num_cols
    K = V * (V - 1) // 2
    if os.path.exists(path):
        return path
    print('Generating {}'.format(path))
    if not os.path.exists(DATA):
        os.makedirs(DATA)
    dataset_path = generate_dataset_file(num_rows, num_cols, num_cats, rate)
    dataset = pickle_load(dataset_path)
    table = dataset['table']
    tree_prior = np.zeros(K, dtype=np.float32)
    config = make_config(learning_init_epochs=5)
    model = train_model(table, tree_prior, config)
    pickle_dump(model, path)
    return path
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def quantize_from_probs2(probs, resolution):
    """Quantize multiple non-normalized probs to given resolution.

    Args:
        probs: An [N, M]-shaped numpy array of non-normalized probabilities.

    Returns:
        An [N, M]-shaped array of quantized probabilities such that
        np.all(result.sum(axis=1) == resolution).
    """
    assert len(probs.shape) == 2
    N, M = probs.shape
    probs = probs / probs.sum(axis=1, keepdims=True)
    result = np.zeros(probs.shape, np.int8)
    range_N = np.arange(N, dtype=np.int32)
    for _ in range(resolution):
        sample = probs.argmax(axis=1)
        result[range_N, sample] += 1
        probs[range_N, sample] -= 1.0 / resolution
    return result
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def make_ragged_index(columns):
    """Make an index to hold data in a ragged array.

    Args:
        columns: A list of [N, _]-shaped numpy arrays of varying size, where
            N is the number of rows.

    Returns:
        A [len(columns)+1]-shaped array of begin,end positions of each column.
    """
    ragged_index = np.zeros([len(columns) + 1], dtype=np.int32)
    ragged_index[0] = 0
    for v, column in enumerate(columns):
        ragged_index[v + 1] = ragged_index[v] + column.shape[-1]
    ragged_index.flags.writeable = False
    return ragged_index
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def count_observations(ragged_index, data):
    """Count the observations in each cell of a ragged data array.

    Args:
        ragged_index: A [V+1]-shaped numpy array as returned by
            make_ragged_index.
        data: A [N, R]-shaped ragged array of multinomial count data, where
            N is the number of rows and R = ragged_index[-1].

    Returns:
        A [N, V]-shaped array whose entries are the number of observations
        in each cell of data.
    """
    N, R = data.shape
    assert R == ragged_index[-1]
    V = len(ragged_index) - 1
    counts = np.zeros([N, V], np.int8)
    for v in range(V):
        beg, end = ragged_index[v:v + 2]
        counts[:, v] = data[:, beg:end].sum(axis=1)
    return counts
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None):

        t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding)
        local_shape     = t_stop - t_start

        local_chunk = numpy.zeros((self.nb_channels, local_shape), dtype=self.data_dtype)
        data_slice  = self._get_slice_(t_start, t_stop) 

        self._open()
        count = 0

        for s in data_slice:
            t_slice = len(s)//self.nb_channels
            local_chunk[:, count:count + t_slice] = self.data[s].reshape(self.nb_channels, len(s)//self.nb_channels)
            count += t_slice

        local_chunk = local_chunk.T
        self._close()

        if nodes is not None:
            if not numpy.all(nodes == numpy.arange(self.nb_channels)):
                local_chunk = numpy.take(local_chunk, nodes, axis=1)

        return self._scale_data_to_float32(local_chunk)
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None):

        t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding)
        local_shape     = t_stop - t_start

        if nodes is None:
            nodes = numpy.arange(self.nb_channels)

        local_chunk = numpy.zeros((local_shape, len(nodes)), dtype=self.data_dtype)
        data_slice  = self._get_slice_(t_start, t_stop) 

        self._open()
        for count, i in enumerate(nodes):
            local_chunk[:, count] = self.data[i][data_slice]
        self._close()

        return self._scale_data_to_float32(local_chunk)
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None):

        t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding)
        local_shape     = t_stop - t_start

        if nodes is None:
            nodes = numpy.arange(self.nb_channels)

        local_chunk = numpy.zeros((local_shape, len(nodes)), dtype=self.data_dtype)
        data_slice  = self._get_slice_(t_start, t_stop) 

        self._open()
        for count, i in enumerate(nodes):
            local_chunk[:, count] = self.data[i][data_slice]
        self._close()

        return self._scale_data_to_float32(local_chunk)
项目:inductive-pooling    作者:HUJI-Deep    | 项目源码 | 文件源码
def corrupt_image(img, MAR_prob=0, min_rects=0, max_rects=0, min_width=0, max_width=0):
    new_img = img.copy()
    mask = np.zeros(img.shape[0:2], dtype=np.bool)
    if MAR_prob > 0:
        mask[(random_sample(mask.shape) < MAR_prob)] = True
    if max_rects > 0 and max_width > 0:
        h, w = mask.shape
        num_rects = random_integers(min_rects, max_rects)
        for i in range(num_rects):
            px1 = random_integers(0, w - min(max(min_width, 1), w))
            py1 = random_integers(0, h - min(max(min_width, 1), h))
            px2 = px1 + (min_width - 1) + random_integers(0, max(min(w - px1 - min_width, max_width - min_width), 0));
            py2 = py1 + (min_width - 1) + random_integers(0, max(min(h - py1 - min_width, max_width - min_width), 0));
            if px1 <= px2 and py1 <= py2:
                mask[py1:py2, px1:px2] = True
            else:
                # One of the sides has length 0, so we should remove any pixels4
                pass
    if len(new_img.shape) == 2:
        new_img[mask] = 0
    else:
        new_img[mask,:] = 0
    return (new_img, 1.0 * mask)

# Process command line inputs
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def decode_segmap(self, temp, plot=False):
        label_colours = self.get_pascal_labels()
        r = temp.copy()
        g = temp.copy()
        b = temp.copy()
        for l in range(0, self.n_classes):
            r[temp == l] = label_colours[l, 0]
            g[temp == l] = label_colours[l, 1]
            b[temp == l] = label_colours[l, 2]

        rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
        rgb[:, :, 0] = r / 255.0
        rgb[:, :, 1] = g / 255.0
        rgb[:, :, 2] = b / 255.0
        if plot:
            plt.imshow(rgb)
            plt.show()
        else:
            return rgb
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def extract_digits(self, image):
        """
        Extract digits from a binary image representing a sudoku
        :param image: binary image/sudoku
        :return: array of digits and their probabilities
        """
        prob = np.zeros(4, dtype=np.float32)
        digits = np.zeros((4, 9, 9), dtype=object)
        for i in range(4):
            labeled, features = label(image, structure=CROSS)
            objs = find_objects(labeled)
            for obj in objs:
                roi = image[obj]
                # center of bounding box
                cy = (obj[0].stop + obj[0].start) / 2
                cx = (obj[1].stop + obj[1].start) / 2
                dists = cdist([[cy, cx]], CENTROIDS, 'euclidean')
                pos = np.argmin(dists)
                cy, cx = pos % 9, pos / 9
                # 28x28 image, center relative to sudoku
                prediction = self.classifier.classify(morph(roi))
                if digits[i, cy, cx] is 0:
                    # Newly found digit
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
                elif prediction[0, 0] > digits[i, cy, cx][0, 0]:
                    # Overlapping! (noise), choose the most probable prediction
                    prob[i] -= digits[i, cy, cx][0, 0]
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
            image = np.rot90(image)
        logging.info(prob)
        return digits[np.argmax(prob)]
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def extract_digits(self, image):
        """
        Extract digits from a binary image representing a sudoku
        :param image: binary image/sudoku
        :return: array of digits and their probabilities
        """
        prob = np.zeros(4, dtype=np.float32)
        digits = np.zeros((4, 9, 9), dtype=object)
        for i in range(4):
            labeled, features = label(image, structure=CROSS)
            objs = find_objects(labeled)
            for obj in objs:
                roi = image[obj]
                # center of bounding box
                cy = (obj[0].stop + obj[0].start) / 2
                cx = (obj[1].stop + obj[1].start) / 2
                dists = cdist([[cy, cx]], CENTROIDS, 'euclidean')
                pos = np.argmin(dists)
                cy, cx = pos % 9, pos / 9
                # 28x28 image, center relative to sudoku
                prediction = self.classifier.classify(morph(roi))
                if digits[i, cy, cx] is 0:
                    # Newly found digit
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
                elif prediction[0, 0] > digits[i, cy, cx][0, 0]:
                    # Overlapping! (noise), choose the most probable prediction
                    prob[i] -= digits[i, cy, cx][0, 0]
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
            image = np.rot90(image)
        logging.info(prob)
        return digits[np.argmax(prob)]
项目:namegenderclassifier    作者:joaoalvarenga    | 项目源码 | 文件源码
def __parse_name(self, name):
        x = np.zeros(len(self.__trigrams_indexes)+len(self.__char_indexes))
        if name in self.__trigrams_indexes.keys():
            x[self.__trigrams_indexes[name[len(name)-3:]]] == 1
        for c in set(name):
            if c in self.__char_indexes.keys():
                x[len(self.__trigrams_indexes)+self.__char_indexes[c]] = name.count(c)/float(len(name))
        return x
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def binarize_predictions(array, task='binary.classification'):
    ''' Turn predictions into decisions {0,1} by selecting the class with largest 
    score for multiclass problems and thresholding at 0.5 for other cases.'''
    # add a very small random value as tie breaker (a bit bad because this changes the score every time)
    # so to make sure we get the same result every time, we seed it    
    #eps = 1e-15
    #np.random.seed(sum(array.shape))
    #array = array + eps*np.random.rand(array.shape[0],array.shape[1])
    bin_array = np.zeros(array.shape)
    if (task != 'multiclass.classification') or (array.shape[1]==1): 
        bin_array[array>=0.5] = 1
    else:        
        sample_num=array.shape[0]
        for i in range(sample_num):
            j = np.argmax(array[i,:])
            bin_array[i,j] = 1        
    return bin_array
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def baseline(output_dir, basename, valid_num, test_num, target_num):
    preds_valid = np.zeros([valid_num , target_num])
    preds_test = np.zeros([test_num , target_num])

    cycle = 0
    filename_valid = basename + '_valid_' + str(cycle).zfill(3) + '.predict'
    data_io.write(os.path.join(output_dir,filename_valid), preds_valid)
    filename_test = basename + '_test_' + str(cycle).zfill(3) + '.predict'
    data_io.write(os.path.join(output_dir,filename_test), preds_test)
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def __generate_tensor(self, is_review, reverse=False):
        """

        :param is_review:
        :param reverse:
        :return:
        """
        seq_length = self.review_max_words if is_review else self.summary_max_words
        total_rev_summary_pairs = self.rev_sum_pair.shape[0]
        data_tensor = np.zeros([total_rev_summary_pairs,seq_length])

        sample = self.rev_sum_pair[0::, 0] if is_review else self.rev_sum_pair[0::, 1]

        for index, entry in enumerate(sample.tolist()):
            index_lst = np.array([self.map[word.lower()] for word in wordpunct_tokenize(entry)])
            # reverse if want to get backward form
            if reverse:
                index_lst = index_lst[::-1]
            # Pad the list
            if len(index_lst) <= seq_length:
                index_lst = np.lib.pad(index_lst, (0,seq_length - index_lst.size), 'constant', constant_values=(0, 0))
            else:
                index_lst = index_lst[0:seq_length]

            data_tensor[index] = index_lst

        return data_tensor
项目:joysix    作者:niberger    | 项目源码 | 文件源码
def dexp(v):
    r = v[0:3]
    dexpr = quat.dexp(r)
    MT = mt(v)
    return np.bmat([[dexpr,MT],[np.zeros((3,3)),dexpr]])
项目:joysix    作者:niberger    | 项目源码 | 文件源码
def dlog(v):
    r = v[0:3]
    dlogr = quat.dlog(r)
    MT = mt(v)
    return np.bmat([[dlogr, -dlogr*MT*dlogr],[np.zeros((3,3)),dlogr]])
项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def get_id_set(lang_codes):
    feature_database = np.load("family_features.npz")
    lang_codes = [ get_language_code(l, feature_database) for l in lang_codes ]
    all_languages = list(feature_database["langs"])
    feature_names = [ "ID_" + l.upper() for l in all_languages ]
    values = np.zeros((len(lang_codes), len(feature_names)))
    for i, lang_code in enumerate(lang_codes):
        feature_index = get_language_index(lang_code, feature_database)
        values[i, feature_index] = 1.0
    return feature_names, values
项目:IntroToDeepLearning    作者:robb-brown    | 项目源码 | 文件源码
def plotFields(layer,fieldShape=None,channel=None,figOffset=1,cmap=None,padding=0.01):
    # Receptive Fields Summary
    try:
        W = layer.W
    except:
        W = layer
    wp = W.eval().transpose();
    if len(np.shape(wp)) < 4:       # Fully connected layer, has no shape
        fields = np.reshape(wp,list(wp.shape[0:-1])+fieldShape) 
    else:           # Convolutional layer already has shape
        features, channels, iy, ix = np.shape(wp)
        if channel is not None:
            fields = wp[:,channel,:,:]
        else:
            fields = np.reshape(wp,[features*channels,iy,ix])

    perRow = int(math.floor(math.sqrt(fields.shape[0])))
    perColumn = int(math.ceil(fields.shape[0]/float(perRow)))

    fig = mpl.figure(figOffset); mpl.clf()

    # Using image grid
    from mpl_toolkits.axes_grid1 import ImageGrid
    grid = ImageGrid(fig,111,nrows_ncols=(perRow,perColumn),axes_pad=padding,cbar_mode='single')
    for i in range(0,np.shape(fields)[0]):
        im = grid[i].imshow(fields[i],cmap=cmap); 

    grid.cbar_axes[0].colorbar(im)
    mpl.title('%s Receptive Fields' % layer.name)

    # old way
    # fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
    # tiled = []
    # for i in range(0,perColumn*perRow,perColumn):
    #   tiled.append(np.hstack(fields2[i:i+perColumn]))
    # 
    # tiled = np.vstack(tiled)
    # mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Receptive Fields' % layer.name); mpl.colorbar();
    mpl.figure(figOffset+1); mpl.clf(); mpl.imshow(np.sum(np.abs(fields),0),cmap=cmap); mpl.title('%s Total Absolute Input Dependency' % layer.name); mpl.colorbar()
项目:IntroToDeepLearning    作者:robb-brown    | 项目源码 | 文件源码
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None):
    # Output summary
    try:
        W = layer.output
    except:
        W = layer
    wp = W.eval(feed_dict=feed_dict);
    if len(np.shape(wp)) < 4:       # Fully connected layer, has no shape
        temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel()
        fields = np.reshape(temp,[1]+fieldShape)
    else:           # Convolutional layer already has shape
        wp = np.rollaxis(wp,3,0)
        features, channels, iy,ix = np.shape(wp)
        if channel is not None:
            fields = wp[:,channel,:,:]
        else:
            fields = np.reshape(wp,[features*channels,iy,ix])

    perRow = int(math.floor(math.sqrt(fields.shape[0])))
    perColumn = int(math.ceil(fields.shape[0]/float(perRow)))
    fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
    tiled = []
    for i in range(0,perColumn*perRow,perColumn):
        tiled.append(np.hstack(fields2[i:i+perColumn]))

    tiled = np.vstack(tiled)
    if figOffset is not None:
        mpl.figure(figOffset); mpl.clf(); 

    mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar();
项目:IntroToDeepLearning    作者:robb-brown    | 项目源码 | 文件源码
def dense_to_one_hot(labels_dense, num_classes=10):
  """Convert class labels from scalars to one-hot vectors."""
  num_labels = labels_dense.shape[0]
  index_offset = numpy.arange(num_labels) * num_classes
  labels_one_hot = numpy.zeros((num_labels, num_classes))
  labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
  return labels_one_hot
项目:IntroToDeepLearning    作者:robb-brown    | 项目源码 | 文件源码
def plotFields(layer,fieldShape=None,channel=None,maxFields=25,figName='ReceptiveFields',cmap=None,padding=0.01):
    # Receptive Fields Summary
    W = layer.W
    wp = W.eval().transpose();
    if len(np.shape(wp)) < 4:       # Fully connected layer, has no shape
        fields = np.reshape(wp,list(wp.shape[0:-1])+fieldShape)
    else:           # Convolutional layer already has shape
        features, channels, iy, ix = np.shape(wp)
        if channel is not None:
            fields = wp[:,channel,:,:]
        else:
            fields = np.reshape(wp,[features*channels,iy,ix])

    fieldsN = min(fields.shape[0],maxFields)
    perRow = int(math.floor(math.sqrt(fieldsN)))
    perColumn = int(math.ceil(fieldsN/float(perRow)))

    fig = mpl.figure(figName); mpl.clf()

    # Using image grid
    from mpl_toolkits.axes_grid1 import ImageGrid
    grid = ImageGrid(fig,111,nrows_ncols=(perRow,perColumn),axes_pad=padding,cbar_mode='single')
    for i in range(0,fieldsN):
        im = grid[i].imshow(fields[i],cmap=cmap);

    grid.cbar_axes[0].colorbar(im)
    mpl.title('%s Receptive Fields' % layer.name)

    # old way
    # fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
    # tiled = []
    # for i in range(0,perColumn*perRow,perColumn):
    #   tiled.append(np.hstack(fields2[i:i+perColumn]))
    #
    # tiled = np.vstack(tiled)
    # mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Receptive Fields' % layer.name); mpl.colorbar();
    mpl.figure(figName+' Total'); mpl.clf(); mpl.imshow(np.sum(np.abs(fields),0),cmap=cmap); mpl.title('%s Total Absolute Input Dependency' % layer.name); mpl.colorbar()
项目:IntroToDeepLearning    作者:robb-brown    | 项目源码 | 文件源码
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None):
    # Output summary
    W = layer.output
    wp = W.eval(feed_dict=feed_dict);
    if len(np.shape(wp)) < 4:       # Fully connected layer, has no shape
        temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel()
        fields = np.reshape(temp,[1]+fieldShape)
    else:           # Convolutional layer already has shape
        wp = np.rollaxis(wp,3,0)
        features, channels, iy,ix = np.shape(wp)
        if channel is not None:
            fields = wp[:,channel,:,:]
        else:
            fields = np.reshape(wp,[features*channels,iy,ix])

    perRow = int(math.floor(math.sqrt(fields.shape[0])))
    perColumn = int(math.ceil(fields.shape[0]/float(perRow)))
    fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
    tiled = []
    for i in range(0,perColumn*perRow,perColumn):
        tiled.append(np.hstack(fields2[i:i+perColumn]))

    tiled = np.vstack(tiled)
    if figOffset is not None:
        mpl.figure(figOffset); mpl.clf();

    mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar();
项目:TDOSE    作者:kasperschmidt    | 项目源码 | 文件源码
def build_2D_cov_matrix(sigmax,sigmay,angle,verbose=True):
    """
    Build a covariance matrix for a 2D multivariate Gaussian

    --- INPUT ---
    sigmax          Standard deviation of the x-compoent of the multivariate Gaussian
    sigmay          Standard deviation of the y-compoent of the multivariate Gaussian
    angle           Angle to rotate matrix by in degrees (clockwise) to populate covariance cross terms
    verbose         Toggle verbosity
    --- EXAMPLE OF USE ---
    import tdose_utilities as tu
    covmatrix = tu.build_2D_cov_matrix(3,1,35)

    """
    if verbose: print ' - Build 2D covariance matrix with varinaces (x,y)=('+str(sigmax)+','+str(sigmay)+\
                      ') and then rotated '+str(angle)+' degrees'
    cov_orig      = np.zeros([2,2])
    cov_orig[0,0] = sigmay**2.0
    cov_orig[1,1] = sigmax**2.0

    angle_rad     = (180.0-angle) * np.pi/180.0 # The (90-angle) makes sure the same convention as DS9 is used
    c, s          = np.cos(angle_rad), np.sin(angle_rad)
    rotmatrix     = np.matrix([[c, -s], [s, c]])

    cov_rot       = np.dot(np.dot(rotmatrix,cov_orig),np.transpose(rotmatrix))  # performing rot * cov * rot^T

    return cov_rot
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
项目:TDOSE    作者:kasperschmidt    | 项目源码 | 文件源码
def gen_noisy_cube(cube,type='poisson',gauss_std=0.5,verbose=True):
    """
    Generate noisy cube based on input cube.

    --- INPUT ---
    cube        Data cube to be smoothed
    type        Type of noise to generate
                  poisson    Generates poissonian (integer) noise
                  gauss      Generates gaussian noise for a gaussian with standard deviation gauss_std=0.5
    gauss_std   Standard deviation of noise if type='gauss'
    verbose     Toggle verbosity

    --- EXAMPLE OF USE ---
    import tdose_utilities as tu
    datacube        = np.ones(([3,3,3])); datacube[0,1,1]=5; datacube[1,1,1]=6; datacube[2,1,1]=8
    cube_with_noise = tu.gen_noisy_cube(datacube,type='gauss',gauss_std='0.5')

    """
    if verbose: print ' - Generating "'+type+'" noise on data cube'
    if type == 'poisson':
        cube_with_noise = np.random.poisson(lam=cube, size=None)
    elif type == 'gauss':
        cube_with_noise = cube + np.random.normal(loc=np.zeros(cube.shape),scale=gauss_std, size=None)
    else:
        sys.exit(' ---> type="'+type+'" is not valid in call to mock_cube_sources.generate_cube_noise() ')

    return cube_with_noise
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
项目:TDOSE    作者:kasperschmidt    | 项目源码 | 文件源码
def perform_2Dconvolution(cube,kernels,use_fftconvolution=False,verbose=True):
    """
    Perform 2D convolution in data cube layer by layer

    --- INPUT ---
    cube                 Data cube to convolve
    kernels              List of (astropy) kernels to apply on each (z/wavelengt)layer of the cube
    use_fftconvolution   To convolve in FFT space set this keyword to True
    verbose              Toggle verbosity

    --- EXAMPLE OF USE ---
    # see tdose_utilities.gen_psfed_cube()

    """
    csh = cube.shape
    cube_convolved = np.zeros(csh)

    for zz in xrange(csh[0]): # looping over wavelength layers of cube
        layer = cube[zz,:,:]
        if use_fftconvolution:
            layer_convolved = ac.convolve_fft(layer, kernels[zz], boundary='fill')
        else:
            layer_convolved = ac.convolve(layer, kernels[zz], boundary='fill')

        cube_convolved[zz,:,:] = layer_convolved

    return cube_convolved
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
项目:TDOSE    作者:kasperschmidt    | 项目源码 | 文件源码
def gen_aperture(imgsize,ypos,xpos,radius,pixval=1,showaperture=False,verbose=True):
    """
    Generating an aperture image

    --- INPUT ---
    imgsize       The dimensions of the array to return. Expects [y-size,x-size].
                  The aperture will be positioned in the center of a (+/-x-size/2., +/-y-size/2) sized array
    ypos          Pixel position in the y direction
    xpos          Pixel position in the x direction
    radius        Radius of aperture in pixels
    showaperture  Display image of generated aperture
    verbose       Toggle verbosity

    --- EXAMPLE OF USE ---
    import tdose_utilities as tu
    apertureimg  = tu.gen_aperture([20,40],10,5,10,showaperture=True)
    apertureimg  = tu.gen_aperture([2000,4000],900,1700,150,showaperture=True)

    """
    if verbose: print ' - Generating aperture in image (2D array)'
    y , x    = np.ogrid[-ypos:imgsize[0]-ypos, -xpos:imgsize[1]-xpos]
    mask     = x*x + y*y <= radius**2.
    aperture = np.zeros(imgsize)

    if verbose: print ' - Assigning pixel value '+str(pixval)+' to aperture'
    aperture[mask] = pixval

    if showaperture:
        if verbose: print ' - Displaying resulting image of aperture'
        plt.imshow(aperture,interpolation='none')
        plt.title('Generated aperture')
        plt.show()

    return aperture
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
项目:DeepAnomaly    作者:adiyoss    | 项目源码 | 文件源码
def build_data_auto_encoder(data, step, win_size):
    count = data.shape[1] / float(step)
    docX = np.zeros((count, 3, win_size))

    for i in range(0, data.shape[1] - win_size, step):
        c = i / step
        docX[c][0] = np.abs(data[0, i:i + win_size] - data[1, i:i + win_size])
        docX[c][1] = np.power(data[0, i:i + win_size] - data[1, i:i + win_size], 2)
        docX[c][2] = np.pad(
            (data[0, i:i + win_size - 1] - data[0, i + 1:i + win_size]) * (data[1, i:i + win_size - 1] - data[1, i + 1:i + win_size]),
            (0, 1), 'constant', constant_values=0)
    data = np.dstack((docX[:, 0], docX[:, 1], docX[:, 2])).reshape(docX.shape[0], docX.shape[1]*docX.shape[2])

    return data
项目:FaceSwap    作者:Aravind-Suresh    | 项目源码 | 文件源码
def get_face_mask(img, img_l):
    img = np.zeros(img.shape[:2], dtype = np.float64)

    for idx in OVERLAY_POINTS_IDX:
        cv2.fillConvexPoly(img, cv2.convexHull(img_l[idx]), color = 1)

    img = np.array([img, img, img]).transpose((1, 2, 0))
    img = (cv2.GaussianBlur(img, (BLUR_AMOUNT, BLUR_AMOUNT), 0) > 0) * 1.0
    img = cv2.GaussianBlur(img, (BLUR_AMOUNT, BLUR_AMOUNT), 0)

    return img
项目:FaceSwap    作者:Aravind-Suresh    | 项目源码 | 文件源码
def warp_image(img, tM, shape):
    out = np.zeros(shape, dtype=img.dtype)
    # cv2.warpAffine(img,
    #                tM[:2],
    #                (shape[1], shape[0]),
    #                dst=out,
    #                borderMode=cv2.BORDER_TRANSPARENT,
    #                flags=cv2.WARP_INVERSE_MAP)
    cv2.warpPerspective(img, tM, (shape[1], shape[0]), dst=out,
                        borderMode=cv2.BORDER_TRANSPARENT,
                        flags=cv2.WARP_INVERSE_MAP)
    return out

# TODO: Modify this method to get a better face contour mask
项目:FaceSwap    作者:Aravind-Suresh    | 项目源码 | 文件源码
def get_contour_mask(dshape, img_fl):
    mask = np.zeros(dshape)
    hull = cv2.convexHull(img_fl)
    cv2.drawContours(mask, [hull], 0, (1, 1, 1) , -1)
    return np.uint8(mask)

# Orients input_ mask onto tmpl_ face
项目:numerai    作者:gansanay    | 项目源码 | 文件源码
def get_earnings_per_round(self, username):
        r = requests.get('{0}/{1}'.format(self._users_url, username))
        if r.status_code!=200:
            return (None, r.status_code)

        rj = r.json()
        rewards = rj['rewards']
        earnings = np.zeros(len(rewards))
        for i in range(len(rewards)):
            earnings[i] = rewards[i]['amount']
        return (earnings, r.status_code)