Python numpy 模块,squeeze() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用numpy.squeeze()

项目:pybot    作者:spillai    | 项目源码 | 文件源码
def __init__(self, model_file, weights_file, mean_file): 
        if not os.path.exists(model_file) or \
           not os.path.exists(weights_file) or \
           not os.path.exists(mean_file): 
            raise ValueError('Invalid model: {}, \nweights file: {}, \nmean file: {}'
                             .format(model_file, weights_file, mean_file))

        # Init caffe with model
        self.net_ = caffe.Net(model_file, weights_file, caffe.TEST)
        self.mean_file_ = mean_file
        self.input_shape_ = self.net_.blobs['data'].data.shape    

        # Initialize mean file
        blob_meanfile = caffe.proto.caffe_pb2.BlobProto()
        data_meanfile = open(mean_file , 'rb' ).read()
        blob_meanfile.ParseFromString(data_meanfile)
        meanfile = np.squeeze(np.array(caffe.io.blobproto_to_array(blob_meanfile)))
        self.meanfile_ = meanfile.transpose((1,2,0))
        self.meanfile_image_ = None
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def _get_mask(self, scan, slide, series):
        img, s, o, origShape = scan
        mask = np.zeros((origShape[1], origShape[2]))
        nodules = self._nodule_info[series]
        for nodule in nodules:
            iid, z, edges = nodule
            z = int((z - o[2])/s[2])
            if z == slide:
                if edges.shape[0] > 1:
                    cv.fillPoly(mask, [edges], 255)
                else:
                    #It's a small nodule. Make a circle of radius 3mm
                    edges = np.squeeze(edges)
                    center = tuple(edges)
                    radius = max(3.0/s[0], 3.0/s[1])
                    cv.circle(mask, center, int(radius+1), 255, -1)

        if img.shape[1] != origShape[1] or img.shape[2] != origShape[2]:
            mask = imu.resize_2d(mask, (img.shape[1], img.shape[2]))
        return mask
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def did_succeed( output_dict, cond_dict ):
    '''
    Used in rejection sampling:
    for each row, determine if cond is satisfied
    for every cond in cond_dict

    success is hardcoded as round(label) being exactly equal
    to the integer in cond_dict
    '''

    #definition success:
    def is_win(key):
        #cond=np.squeeze(cond_dict[key])
        cond=np.squeeze(cond_dict[key])
        val=np.squeeze(output_dict[key])
        condition= np.round(val)==cond
        return condition

    scoreboard=[is_win(key) for key in cond_dict]
    #print('scoreboard', scoreboard)
    all_victories_bool=np.logical_and.reduce(scoreboard)
    return all_victories_bool.flatten()
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def post_sub_one(inx):
    w,h = 1918,1280
    path,out,threshold = inx
    data = np.load(path).item()
    imgs,pred = data['name'], data['pred']
    #print(pred.shape)
    fo = open(out,'w')
    #masks = pred>threshold
    for name,mask in zip(imgs,np.squeeze(pred)):
        mask = imresize(mask,[h,w])
        mask = mask>threshold
        code = rle_encode(mask)
        code = [str(i) for i in code]
        code = " ".join(code)
        fo.write("%s,%s\n"%(name,code))
    fo.close()
    return 0
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def show_one_img_mask(data):
    w,h = 1918,1280
    a = randint(0,31)
    path = "../input/test"
    data = np.load(data).item()
    name,masks = data['name'][a],data['pred']
    img = Image.open("%s/%s"%(path,name))
    #img.show()
    plt.imshow(img)
    plt.show()
    mask = np.squeeze(masks[a])
    mask = imresize(mask,[h,w]).astype(np.float32)
    print(mask.shape,mask[0])
    img = Image.fromarray(mask*256)#.resize([w,h])
    plt.imshow(img)
    plt.show()
项目:SGAN    作者:YuhangSong    | 项目源码 | 文件源码
def log_img(x,name,iteration=0,nrow=8):

    def log_img_final(x,name,iteration=0,nrow=8):
        vutils.save_image(
            x,
            LOGDIR+name+'_'+str(iteration)+'.png',
            nrow=nrow,
        )
        vis.images( 
            x.cpu().numpy(),
            win=str(MULTI_RUN)+'-'+name,
            opts=dict(caption=str(MULTI_RUN)+'-'+name+'_'+str(iteration)),
            nrow=nrow,
        )

    if params['REPRESENTATION']==chris_domain.VECTOR:
        x = vector2image(x)
    x = x.squeeze(1)
    if params['DOMAIN']=='2Dgrid':
        if x.size()[1]==2:
            log_img_final(x[:,0:1,:,:],name+'_b',iteration,nrow)
            log_img_final(x[:,1:2,:,:],name+'_a',iteration,nrow)
            x = torch.cat([x,x[:,0:1,:,:]],1)
    log_img_final(x,name,iteration,nrow)
项目:SGAN    作者:YuhangSong    | 项目源码 | 文件源码
def plot_convergence(images,name):
    '''
        evaluate domain
    '''

    dis, accept_rate = get_transition_prob_distribution(images)
    if not (np.sum(dis)==0.0):
        kl = scipy.stats.entropy(
            dis,
            qk=params['GRID_ACTION_DISTRIBUTION'],
            base=None
        )
        logger.plot(
            name+'-KL',
            np.asarray([kl])
        )
    l1 = np.squeeze(np.sum(np.abs(dis - np.asarray(params['GRID_ACTION_DISTRIBUTION']))))
    logger.plot(
        name+'-L1',
        np.asarray([l1])
    )
    logger.plot(
        name+'-AR',
        np.asarray([accept_rate])
    )
项目:TAC-GAN    作者:dashayushman    | 项目源码 | 文件源码
def save_for_viz_val(data_dir, generated_images, image_files, image_caps,
                     image_ids, image_size, id):

    generated_images = np.squeeze(np.array(generated_images))
    for i in range(0, generated_images.shape[0]) :
        image_dir = join(data_dir, str(image_ids[i]))
        if not os.path.exists(image_dir):
            os.makedirs(image_dir)

        real_image_path = join(image_dir,
                               '{}.jpg'.format(image_ids[i]))
        if os.path.exists(image_dir):
            real_images_255 = image_processing.load_image_array(image_files[i],
                                        image_size, image_ids[i], mode='val')
            scipy.misc.imsave(real_image_path, real_images_255)

        caps_dir = join(image_dir, "caps.txt")
        if not os.path.exists(caps_dir):
            with open(caps_dir, "w") as text_file:
                text_file.write(image_caps[i]+"\n")

        fake_images_255 = generated_images[i]
        scipy.misc.imsave(join(image_dir, 'fake_image_{}.jpg'.format(id)),
                          fake_images_255)
项目:TAC-GAN    作者:dashayushman    | 项目源码 | 文件源码
def save_distributed_image_batch(data_dir, generated_images, sel_i, sel_2, z_i,
                                 t_i, sel_img, sel_cap, sel_img_2, sel_cap_2,
                                 batch_size):

    generated_images = np.squeeze(generated_images)
    folder_name = str(sel_i) + '_' + str(sel_2)

    image_dir = join(data_dir, 't_interpolation', folder_name, str(z_i))
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)

    meta_path = os.path.join(image_dir, "meta.txt")
    with open(meta_path, "w") as text_file:
        text_file.write(str(sel_img) + "\t" + str(sel_cap) +
                        str(sel_img_2) + "\t" + str(sel_cap_2))
    fake_image_255 = (generated_images[batch_size-1])
    scipy.misc.imsave(join(image_dir, '{}.jpg'.format(t_i)),
                      fake_image_255)
项目:speechless    作者:JuliusKunze    | 项目源码 | 文件源码
def _decode_lambda(self, args):
        """
        Decoding within tensorflow graph.
        In case kenlm_directory is specified, a modified version of tensorflow 
        (available at https://github.com/timediv/tensorflow-with-kenlm) 
        is needed to run that extends ctc_decode to use a kenlm decoder.
        :return: 
            Most probable decoded sequence.  Important: blank labels are returned as `-1`. 
        """
        import tensorflow as tf

        prediction_batch, prediction_lengths = args

        log_prediction_batch = tf.log(tf.transpose(prediction_batch, perm=[1, 0, 2]) + 1e-8)
        prediction_length_batch = tf.to_int32(tf.squeeze(prediction_lengths, axis=[1]))

        (decoded, log_prob) = self.ctc_get_decoded_and_log_probability_batch(log_prediction_batch,
                                                                             prediction_length_batch)

        return single([tf.sparse_to_dense(st.indices, st.dense_shape, st.values, default_value=-1) for st in decoded])
项目:speechless    作者:JuliusKunze    | 项目源码 | 文件源码
def test_and_predict_batch(self, labeled_spectrogram_batch: List[LabeledSpectrogram]) -> ExpectationsVsPredictions:
        input_by_name, dummy_labels = self._inputs_for_loss_net(labeled_spectrogram_batch)

        predicted_graphemes, loss_batch = self.get_predicted_graphemes_and_loss_batch(
            [input_by_name[input.name.split(":")[0]] for input in self.loss_net.inputs] + [self.prediction_phase_flag])

        # blank labels are returned as -1 by tensorflow:
        predicted_graphemes[predicted_graphemes < 0] = self.grapheme_encoding.ctc_blank

        prediction_lengths = list(numpy.squeeze(input_by_name[Wav2Letter.InputNames.prediction_lengths], axis=1))
        losses = list(numpy.squeeze(loss_batch, axis=1))

        # merge was already done by tensorflow, so we disable it here:
        predictions = self.grapheme_encoding.decode_grapheme_batch(predicted_graphemes, prediction_lengths,
                                                                   merge_repeated=False)

        return ExpectationsVsPredictions(
            [ExpectationVsPrediction(predicted=predicted, expected=expected, loss=loss) for predicted, expected, loss in
             zip(predictions, (e.label for e in labeled_spectrogram_batch), losses)])
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def sample_embeddings(self, embeddings, filenames, class_id, sample_num):
        if len(embeddings.shape) == 2 or embeddings.shape[1] == 1:
            return np.squeeze(embeddings)
        else:
            batch_size, embedding_num, _ = embeddings.shape
            # Take every sample_num captions to compute the mean vector
            sampled_embeddings = []
            sampled_captions = []
            for i in range(batch_size):
                randix = np.random.choice(embedding_num,
                                          sample_num, replace=False)
                if sample_num == 1:
                    randix = int(randix)
                    captions = self.readCaptions(filenames[i],
                                                 class_id[i])
                    sampled_captions.append(captions[randix])
                    sampled_embeddings.append(embeddings[i, randix, :])
                else:
                    e_sample = embeddings[i, randix, :]
                    e_mean = np.mean(e_sample, axis=0)
                    sampled_embeddings.append(e_mean)
            sampled_embeddings_array = np.array(sampled_embeddings)
            return np.squeeze(sampled_embeddings_array), sampled_captions
项目:RaspberryPi-Robot    作者:timestocome    | 项目源码 | 文件源码
def is_cat(self):

        #now = datetime.datetime.now()

        # take photo
        t = self.capture_image()

        # see if Min, Merlin or no cat in photo
        input_operation = self.graph.get_operation_by_name(self.input_layer_name);
        output_operation = self.graph.get_operation_by_name(self.output_layer_name);

        results = self.sess.run(output_operation.outputs[0], {input_operation.outputs[0]: t})
        results = np.squeeze(results)



        found = []
        for i in range(3):
            found.append((self.labels[i], results[i]))

        #print(datetime.datetime.now() - now)
        return found
项目:RaspberryPi-Robot    作者:timestocome    | 项目源码 | 文件源码
def is_cat(self):

        now = datetime.datetime.now()

        # take photo
        t = self.capture_image()

        # see if Min, Merlin or no cat in photo
        input_operation = self.graph.get_operation_by_name(self.input_layer_name);
        output_operation = self.graph.get_operation_by_name(self.output_layer_name);

        results = self.sess.run(output_operation.outputs[0], {input_operation.outputs[0]: t})
        results = np.squeeze(results)



        found = []
        for i in range(3):
            found.append((self.labels[i], results[i]))


        print(datetime.datetime.now() - now)
        return found
项目:RaspberryPi-Robot    作者:timestocome    | 项目源码 | 文件源码
def run_graph(self):

        # take photo
        t = self.capture_image()

        # see if Min, Merlin or no cat in photo
        input_operation = self.graph.get_operation_by_name(self.input_layer_name);
        output_operation = self.graph.get_operation_by_name(self.output_layer_name);

        results = self.sess.run(output_operation.outputs[0], {input_operation.outputs[0]: t})
        results = np.squeeze(results)

        top_k = results.argsort()[-3:][::-1]

        # print results
        #for i in top_k:
        #    print(self.labels[i], results[i])

        found = []
        for i in top_k:
            found.append((self.labels[i], results[i]))
        return found
项目:iterative_inference_segm    作者:adri-romsor    | 项目源码 | 文件源码
def my_label2rgboverlay(labels, colors, image, alpha=0.2):
    """
    Generates image with segmentation labels on top

    Parameters
    ----------
    labels:  labels of one image (0, 1)
    colors:  colormap
    image:   image (0, 1, c), where c=3 (rgb)
    alpha: transparency
    """
    image_float = gray2rgb(img_as_float(rgb2gray(image) if
                                        image.shape[2] == 3 else
                                        np.squeeze(image)))
    label_image = my_label2rgb(labels, colors)
    output = image_float * alpha + label_image * (1 - alpha)
    return output
项目:mx-rfcn    作者:giorking    | 项目源码 | 文件源码
def do_checkpoint(prefix):
    """Callback to checkpoint the model to prefix every epoch.

    Parameters
    ----------
    prefix : str
        The file prefix to checkpoint to

    Returns
    -------
    callback : function
        The callback function that can be passed as iter_end_callback to fit.
    """
    def _callback(iter_no, sym, arg, aux):
        #if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
        #    print "save model with mean/std"
        #    num_classes = len(arg['bbox_pred_bias'].asnumpy()) / 4
        #    means = np.tile(np.array(config.TRAIN.BBOX_MEANS), (1, num_classes))
        #    stds = np.tile(np.array(config.TRAIN.BBOX_STDS), (1, num_classes))
        #    arg['bbox_pred_weight'] = (arg['bbox_pred_weight'].T * mx.nd.array(stds)).T
        #    arg['bbox_pred_bias'] = arg['bbox_pred_bias'] * mx.nd.array(np.squeeze(stds)) + \
        #                                   mx.nd.array(np.squeeze(means))
        """The checkpoint function."""
        save_checkpoint(prefix, iter_no + 1, sym, arg, aux)
    return _callback
项目:SRGAN-tensorflow    作者:zoharli    | 项目源码 | 文件源码
def batch_ssim(dbatch):
    im1,im2=np.split(dbatch,2)
    imgsize=im1.shape[1]*im1.shape[2]
    avg1=im1.mean((1,2),keepdims=1)
    avg2=im2.mean((1,2),keepdims=1)
    std1=im1.std((1,2),ddof=1)
    std2=im2.std((1,2),ddof=1)
    cov=((im1-avg1)*(im2-avg2)).mean((1,2))*imgsize/(imgsize-1)
    avg1=np.squeeze(avg1)
    avg2=np.squeeze(avg2)
    k1=0.01
    k2=0.03
    c1=(k1*255)**2
    c2=(k2*255)**2
    c3=c2/2
    return np.mean((2*avg1*avg2+c1)*2*(cov+c3)/(avg1**2+avg2**2+c1)/(std1**2+std2**2+c2))
项目:sound_field_analysis-py    作者:QULab    | 项目源码 | 文件源码
def dspbessel(n, kr):
    """Derivative of spherical Bessel (first kind) of order n at kr

    Parameters
    ----------
    n : array_like
       Order
    kr: array_like
       Argument

    Returns
    -------
    J' : complex float
       Derivative of spherical Bessel
    """
    return _np.squeeze((n * spbessel(n - 1, kr) - (n + 1) * spbessel(n + 1, kr)) / (2 * n + 1))
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def map(y_true, y_pred, rel_threshold=0):
    s = 0.
    y_true = _to_list(np.squeeze(y_true).tolist())
    y_pred = _to_list(np.squeeze(y_pred).tolist())
    c = list(zip(y_true, y_pred))
    random.shuffle(c)
    c = sorted(c, key=lambda x:x[1], reverse=True)
    ipos = 0
    for j, (g, p) in enumerate(c):
        if g > rel_threshold:
            ipos += 1.
            s += ipos / ( j + 1.)
    if ipos == 0:
        s = 0.
    else:
        s /= ipos
    return s
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def recall(k=10):
    def top_k(y_true, y_pred, rel_threshold=0.):
        if k <= 0:
            return 0.
        s = 0.
        y_true = _to_list(np.squeeze(y_true).tolist()) # y_true: the ground truth scores for documents under a query
        y_pred = _to_list(np.squeeze(y_pred).tolist()) # y_pred: the predicted scores for documents under a query
        pos_count = sum(i > rel_threshold for i in y_true) # total number of positive documents under this query
        c = list(zip(y_true, y_pred))
        random.shuffle(c)
        c = sorted(c, key=lambda x: x[1], reverse=True)
        ipos = 0
        recall = 0.
        for i, (g, p) in enumerate(c):
            if i >= k:
                break
            if g > rel_threshold:
                recall += 1
        recall /= pos_count
        return recall
    return top_k
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def eval_map(y_true, y_pred, rel_threshold=0):
    s = 0.
    y_true = np.squeeze(y_true)
    y_pred = np.squeeze(y_pred)
    c = zip(y_true, y_pred)
    random.shuffle(c)
    c = sorted(c, key=lambda x:x[1], reverse=True)
    ipos = 0
    for j, (g, p) in enumerate(c):
        if g > rel_threshold:
            ipos += 1.
            s += ipos / ( j + 1.)
    if ipos == 0:
        s = 0.
    else:
        s /= ipos
    return s
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def eval_precision(y_true, y_pred, k = 10, rel_threshold=0.):
    if k <= 0:
        return 0.
    s = 0.
    y_true = np.squeeze(y_true)
    y_pred = np.squeeze(y_pred)
    c = zip(y_true, y_pred)
    random.shuffle(c)
    c = sorted(c, key=lambda x:x[1], reverse=True)
    ipos = 0
    precision = 0.
    for i, (g,p) in enumerate(c):
        if i >= k:
            break
        if g > rel_threshold:
            precision += 1
    precision /=  k
    return precision
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def eval_precision(y_true, y_pred, k = 10, rel_threshold=0.):
    if k <= 0:
        return 0.
    s = 0.
    y_true = np.squeeze(y_true)
    y_pred = np.squeeze(y_pred)
    c = zip(y_true, y_pred)
    random.shuffle(c)
    c = sorted(c, key=lambda x:x[1], reverse=True)
    ipos = 0
    precision = 0.
    for i, (g,p) in enumerate(c):
        if i >= k:
            break
        if g > rel_threshold:
            precision += 1
    precision /=  k
    return precision
项目:DL4MT    作者:thompsonb    | 项目源码 | 文件源码
def score(self, x_or_y):
        if len(x_or_y.shape) > 2:  # x shape: (1, N, M). y shape: (N, M)  todo: work with factors
            x_or_y = numpy.squeeze(x_or_y, axis=0)
        """
        Nematus is generally called on 1)Tokenized, 2)Truecased, 3)BPE data.
        So we will train KenLM on Tokenized, Truecase data.
        Therefore all we need to do is convert to a string and deBPE.
        """
        sentences = [deBPE(seqs2words(seq, self.id_to_word)) for seq in x_or_y.T]
        scores = self.model.score(sentences)
        #try:
        #    print 'remote LM sentences/scores:'
        #    for sent, score in zip(sentences, scores):
        #        print '"'+sent+'":', score
        #except Exception, e:
        #    print 'failed to print LM sentences/scores', e
        return scores
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def calculate_residual_3D_Points( ref_points, gaze_points, eye_to_world_matrix ):

    average_distance = 0.0
    distance_variance = 0.0
    transformed_gaze_points = []

    for p in gaze_points:
        point = np.zeros(4)
        point[:3] = p
        point[3] = 1.0
        point = eye_to_world_matrix.dot(point)
        point = np.squeeze(np.asarray(point))
        transformed_gaze_points.append( point[:3] )

    for(a,b) in zip( ref_points, transformed_gaze_points):
        average_distance += np.linalg.norm(a-b)

    average_distance /= len(ref_points)

    for(a,b) in zip( ref_points, transformed_gaze_points):
        distance_variance += (np.linalg.norm(a-b) - average_distance)**2

    distance_variance /= len(ref_points)

    return average_distance, distance_variance
项目:probabilistic-matrix-factorization    作者:aki-nishimura    | 项目源码 | 文件源码
def update_per_row(self, y_i, phi_i, J, mu0, c, v, r_prev_i, u_prev_i, phi_r_i, phi_u):
        # Params:
        #   J - column indices

        nnz_i = len(J)
        residual_i = y_i - mu0 - c[J]
        prior_Phi = np.diag(np.concatenate(([phi_r_i], phi_u)))
        v_T = np.hstack((np.ones((nnz_i, 1)), v[J, :]))
        post_Phi_i = prior_Phi + \
                     np.dot(v_T.T,
                            np.tile(phi_i[:, np.newaxis], (1, 1 + self.num_factor)) * v_T)  # Weighted sum of v_j * v_j.T
        post_mean_i = np.squeeze(np.dot(phi_i * residual_i, v_T))
        C, lower = scipy.linalg.cho_factor(post_Phi_i)
        post_mean_i = scipy.linalg.cho_solve((C, lower), post_mean_i)
        # Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation.
        ru_i = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_i)),
                                                                                   lower=lower)
        ru_i += post_mean_i + self.relaxation * (post_mean_i - np.concatenate(([r_prev_i], u_prev_i)))
        r_i = ru_i[0]
        u_i = ru_i[1:]

        return r_i, u_i
项目:probabilistic-matrix-factorization    作者:aki-nishimura    | 项目源码 | 文件源码
def update_per_col(self, y_j, phi_j, I, mu0, r, u, c_prev_j, v_prev_j, phi_c_j, phi_v):

        prior_Phi = np.diag(np.concatenate(([phi_c_j], phi_v)))
        nnz_j = len(I)
        residual_j = y_j - mu0 - r[I]
        u_T = np.hstack((np.ones((nnz_j, 1)), u[I, :]))
        post_Phi_j = prior_Phi + \
                     np.dot(u_T.T,
                            np.tile(phi_j[:, np.newaxis], (1, 1 + self.num_factor)) * u_T)  # Weighted sum of u_i * u_i.T
        post_mean_j = np.squeeze(np.dot(phi_j * residual_j, u_T))
        C, lower = scipy.linalg.cho_factor(post_Phi_j)
        post_mean_j = scipy.linalg.cho_solve((C, lower), post_mean_j)
        # Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation.
        cv_j = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_j)),
                                                                              lower=lower)
        cv_j += post_mean_j + self.relaxation * (post_mean_j - np.concatenate(([c_prev_j], v_prev_j)))
        c_j = cv_j[0]
        v_j = cv_j[1:]

        return c_j, v_j
项目:deep-lossy-fun    作者:PetarV-    | 项目源码 | 文件源码
def deprocess_and_save(x, img_path):
    # Remove the batch dimension
    x = np.squeeze(x)

    # Restore the mean values on each channel
    x[:, :, 0] += 103.939
    x[:, :, 1] += 116.779
    x[:, :, 2] += 123.68

    # BGR --> RGB
    x = x[:, :, ::-1]

    # Clip unprintable colours
    x = np.clip(x, 0, 255).astype('uint8')

    # Save the image
    imsave(img_path, x)
项目:cleverhans    作者:tensorflow    | 项目源码 | 文件源码
def cleverhans_attack_wrapper(cleverhans_attack_fn, reset=True):
    def attack(a):
        session = tf.Session()
        with session.as_default():
            model = RVBCleverhansModel(a)
            adversarial_image = cleverhans_attack_fn(model, session, a)
            adversarial_image = np.squeeze(adversarial_image, axis=0)
            if reset:
                # optionally, reset to ignore other adversarials
                # found during the search
                a._reset()
            # run predictions to make sure the returned adversarial
            # is taken into account
            min_, max_ = a.bounds()
            adversarial_image = np.clip(adversarial_image, min_, max_)
            a.predictions(adversarial_image)
    return attack
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def prepare_oae_PU3(known_transisitons):
    print("discriminate the correct transitions and the other transitions generated by OAE,",
          " filtered by the learned state discriminator",
          sep="\n")
    N = known_transisitons.shape[1] // 2
    y = generate_oae_action(known_transisitons)

    print("removing invalid successors (sd3)")
    ind = np.where(np.squeeze(combined(y[:,N:])) > 0.5)[0]

    y = y[ind]
    if len(known_transisitons) > 100:
        y = y[:len(known_transisitons)] # undersample

    print("valid:",len(known_transisitons),"mixed:",len(y),)
    print("creating binary classification labels")
    return (default_networks['PUDiscriminator'], *prepare_binary_classification_data(known_transisitons, y))

################################################################
# training parameters
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def prepare_oae_PU4(known_transisitons):
    print("Learn from pre + action label",
          "*** INCOMPATIBLE MODEL! ***",
          sep="\n")
    N = known_transisitons.shape[1] // 2

    y = generate_oae_action(known_transisitons)

    ind = np.where(np.squeeze(combined(y[:,N:])) > 0.5)[0]

    y = y[ind]

    actions = oae.encode_action(known_transisitons, batch_size=1000).round()
    positive = np.concatenate((known_transisitons[:,:N], np.squeeze(actions)), axis=1)
    actions = oae.encode_action(y, batch_size=1000).round()
    negative = np.concatenate((y[:,:N], np.squeeze(actions)), axis=1)
    # random.shuffle(negative)
    # negative = negative[:len(positive)]
    # normalize
    return (default_networks['PUDiscriminator'], *prepare_binary_classification_data(positive, negative))
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def prepare_oae_PU5(known_transisitons):
    print("Learn from pre + suc + action label",
          "*** INCOMPATIBLE MODEL! ***",
          sep="\n")
    N = known_transisitons.shape[1] // 2

    y = generate_oae_action(known_transisitons)

    ind = np.where(np.squeeze(combined(y[:,N:])) > 0.5)[0]

    y = y[ind]

    actions = oae.encode_action(known_transisitons, batch_size=1000).round()
    positive = np.concatenate((known_transisitons, np.squeeze(actions)), axis=1)
    actions = oae.encode_action(y, batch_size=1000).round()
    negative = np.concatenate((y, np.squeeze(actions)), axis=1)
    # random.shuffle(negative)
    # negative = negative[:len(positive)]
    # normalize
    return (default_networks['PUDiscriminator'], *prepare_binary_classification_data(positive, negative))
项目:stanity    作者:hammerlab    | 项目源码 | 文件源码
def sumlogs(x, axis=None, out=None):
    """Sum of vector where numbers are represented by their logarithms.

    Calculates np.log(np.sum(np.exp(x), axis=axis)) in such a fashion that it
    works even when elements have large magnitude.

    """
    maxx = x.max(axis=axis, keepdims=True)
    xnorm = x - maxx
    np.exp(xnorm, out=xnorm)
    out = np.sum(xnorm, axis=axis, out=out)
    if isinstance(out, np.ndarray):
        np.log(out, out=out)
    else:
        out = np.log(out)
    out += np.squeeze(maxx)
    return out
项目:tensorflow-prebuilt-classifier    作者:recursionbane    | 项目源码 | 文件源码
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
                            bottleneck_tensor):
  """Runs inference on an image to extract the 'bottleneck' summary layer.

  Args:
    sess: Current active TensorFlow Session.
    image_data: String of raw JPEG data.
    image_data_tensor: Input data layer in the graph.
    bottleneck_tensor: Layer before the final softmax.

  Returns:
    Numpy array of bottleneck values.
  """
  bottleneck_values = sess.run(
      bottleneck_tensor,
      {image_data_tensor: image_data})
  bottleneck_values = np.squeeze(bottleneck_values)
  return bottleneck_values
项目:fem    作者:mlp6    | 项目源码 | 文件源码
def extract_image_plane(snic, axes, ele_pos):
    """extract 2D imaging plane node IDs

    Extract a 2D matrix of the imaging plane node IDs based on the
    elevation position (mesh coordinates).

    :param snic: sorted node IDs and coordinates
    :param axes: spatial axes
    :param ele_pos: elevation position for plane of interest
    :returns: image_plane (node IDs)

    """
    import numpy as np

    ele0 = np.min(np.where(axes[0] >= ele_pos))
    image_plane = np.squeeze(snic[ele0, :, :]).astype(int)

    return image_plane
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_basic(self):
        from numpy.random import rand

        a = rand(20, 10, 10, 1, 1)
        b = rand(20, 1, 10, 1, 20)
        c = rand(1, 1, 20, 10)
        assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10)))
        assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20)))
        assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10)))

        # Squeezing to 0-dim should still give an ndarray
        a = [[[1.5]]]
        res = np.squeeze(a)
        assert_equal(res, 1.5)
        assert_equal(res.ndim, 0)
        assert_equal(type(res), np.ndarray)
项目:django-corenlp    作者:arunchaganty    | 项目源码 | 文件源码
def k_nearest_approx(self, vec, k):
        """Get the k nearest neighbors of a vector (in terms of cosine similarity).

        :param (np.array) vec: query vector
        :param (int) k: number of top neighbors to return

        :return (list[tuple[str, float]]): a list of (word, cosine similarity) pairs, in descending order
        """
        if not hasattr(self, 'lshf'):
            self.lshf = self._init_lsh_forest()

        # TODO(kelvin): make this inner product score, to be consistent with k_nearest
        distances, neighbors = self.lshf.kneighbors(vec, n_neighbors=k, return_distance=True)
        scores = np.subtract(1, distances)
        nbr_score_pairs = self.score_map(np.squeeze(neighbors), np.squeeze(scores))

        return sorted(nbr_score_pairs.items(), key=lambda x: x[1], reverse=True)
项目:django-corenlp    作者:arunchaganty    | 项目源码 | 文件源码
def k_nearest_approx(self, vec, k):
        """Get the k nearest neighbors of a vector (in terms of cosine similarity).

        :param (np.array) vec: query vector
        :param (int) k: number of top neighbors to return

        :return (list[tuple[str, float]]): a list of (word, cosine similarity) pairs, in descending order
        """
        if not hasattr(self, 'lshf'):
            self.lshf = self._init_lsh_forest()

        # TODO(kelvin): make this inner product score, to be consistent with k_nearest
        distances, neighbors = self.lshf.kneighbors(vec, n_neighbors=k, return_distance=True)
        scores = np.subtract(1, distances)
        nbr_score_pairs = self.score_map(np.squeeze(neighbors), np.squeeze(scores))

        return sorted(nbr_score_pairs.items(), key=lambda x: x[1], reverse=True)
项目:HTM_experiments    作者:ctrl-z-9000-times    | 项目源码 | 文件源码
def new_image(self, image, diag=False):
        if isinstance(image, str):
            self.image_file = image
            self.image = np.array(PIL.Image.open(image))
        else:
            self.image_file = None
            self.image = image
        # Get the image into the right format.
        if self.image.dtype != np.uint8:
            raise TypeError('Image %s dtype is not unsigned 8 bit integer, image.dtype is %s.'%(
                    '"%s"'%self.image_file if self.image_file is not None else 'argument',
                    self.image.dtype))
        self.image = np.squeeze(self.image)
        if len(self.image.shape) == 2:
            self.image = np.dstack([self.image] * 3)

        self.preprocess_edges()
        self.randomize_view()

        if diag:
            plt.figure('Image')
            plt.title('Image')
            plt.imshow(self.image, interpolation='nearest')
            plt.show()
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def plot(self, image, filename, save_sample):
        """ Plot an image."""
        image = np.minimum(image, 1)
        image = np.maximum(image, -1)
        image = np.squeeze(image)
        # Scale to 0..255.
        imin, imax = image.min(), image.max()
        image = (image - imin) * 255. / (imax - imin) + .5
        image = image.astype(np.uint8)
        if save_sample:
            try:
                Image.fromarray(image).save(filename)
            except Exception as e:
                print("Warning: could not sample to ", filename, ".  Please check permissions and make sure the path exists")
                print(e)
        GlobalViewer.update(image)
项目:iGAN    作者:junyanz    | 项目源码 | 文件源码
def grid_vis(X, nh, nw): #[buggy]
    if X.shape[0] == 1:
        return X[0]

    # nc = 3
    if X.ndim == 3:
        X = X[..., np.newaxis]
    if X.shape[-1] == 1:
        X = np.tile(X, [1,1,1,3])

    h, w = X[0].shape[:2]

    if X.dtype == np.uint8:
        img = np.ones((h * nh, w * nw, 3), np.uint8) * 255
    else:
        img = np.ones((h * nh, w * nw, 3), X.dtype)

    for n, x in enumerate(X):
        j = n // nw
        i = n % nw
        img[j * h:j * h + h, i * w:i * w + w, :] = x
    img = np.squeeze(img)
    return img
项目:SIF    作者:PrincetonML    | 项目源码 | 文件源码
def sim_getCorrelation(We,words,f, weight4ind, scoring_function, params):
    f = open(f,'r')
    lines = f.readlines()
    golds = []
    seq1 = []
    seq2 = []
    for i in lines:
        i = i.split("\t")
        p1 = i[0]; p2 = i[1]; score = float(i[2])
        X1, X2 = data_io.getSeqs(p1,p2,words)
        seq1.append(X1)
        seq2.append(X2)
        golds.append(score)
    x1,m1 = data_io.prepare_data(seq1)
    x2,m2 = data_io.prepare_data(seq2)
    m1 = data_io.seq2weight(x1, m1, weight4ind)
    m2 = data_io.seq2weight(x2, m2, weight4ind)
    scores = scoring_function(We,x1,x2,m1,m2, params)
    preds = np.squeeze(scores)
    return pearsonr(preds,golds)[0], spearmanr(preds,golds)[0]
项目:SIF    作者:PrincetonML    | 项目源码 | 文件源码
def getCorrelation(model,words,f, params=[]):
    f = open(f,'r')
    lines = f.readlines()
    preds = []
    golds = []
    seq1 = []
    seq2 = []
    for i in lines:
        i = i.split("\t")
        p1 = i[0]; p2 = i[1]; score = float(i[2])
        X1, X2 = data_io.getSeqs(p1,p2,words)
        seq1.append(X1)
        seq2.append(X2)
        golds.append(score)
    x1,m1 = data_io.prepare_data(seq1)
    x2,m2 = data_io.prepare_data(seq2)
    if params and params.weightfile:
        m1 = data_io.seq2weight(x1, m1, params.weight4ind)
        m2 = data_io.seq2weight(x2, m2, params.weight4ind)
    scores = model.scoring_function(x1,x2,m1,m2)
    preds = np.squeeze(scores)
    return pearsonr(preds,golds)[0], spearmanr(preds,golds)[0]
项目:magenta    作者:tensorflow    | 项目源码 | 文件源码
def save_np_image(image, output_file, save_format='jpeg'):
  """Saves an image to disk.

  Args:
    image: 3-D numpy array of shape [image_size, image_size, 3] and dtype
        float32, with values in [0, 1].
    output_file: str, output file.
    save_format: format for saving image (eg. jpeg).
  """
  image = np.uint8(image * 255.0)
  buf = io.BytesIO()
  scipy.misc.imsave(buf, np.squeeze(image, 0), format=save_format)
  buf.seek(0)
  f = tf.gfile.GFile(output_file, 'w')
  f.write(buf.getvalue())
  f.close()
项目:image_recognition    作者:tue-robotics    | 项目源码 | 文件源码
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
                            bottleneck_tensor):
  """Runs inference on an image to extract the 'bottleneck' summary layer.

  Args:
    sess: Current active TensorFlow Session.
    image_data: String of raw JPEG data.
    image_data_tensor: Input data layer in the graph.
    bottleneck_tensor: Layer before the final softmax.

  Returns:
    Numpy array of bottleneck values.
  """
  bottleneck_values = sess.run(
      bottleneck_tensor,
      {image_data_tensor: image_data})
  bottleneck_values = np.squeeze(bottleneck_values)
  return bottleneck_values
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def standardize(data_train, data_test):
    """
    Standardize a dataset to have zero mean and unit standard deviation.

    :param data_train: 2-D Numpy array. Training data.
    :param data_test: 2-D Numpy array. Test data.

    :return: (train_set, test_set, mean, std), The standardized dataset and
        their mean and standard deviation before processing.
    """
    std = np.std(data_train, 0, keepdims=True)
    std[std == 0] = 1
    mean = np.mean(data_train, 0, keepdims=True)
    data_train_standardized = (data_train - mean) / std
    data_test_standardized = (data_test - mean) / std
    mean, std = np.squeeze(mean, 0), np.squeeze(std, 0)
    return data_train_standardized, data_test_standardized, mean, std
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def effective_sample_size(samples, burn_in=100):
    """
    Compute the effective sample size of a chain of vector samples, using the
    algorithm in Stan. Users should flatten their samples as vectors if not so.

    :param samples: A 2-D numpy array of shape ``(M, D)``, where ``M`` is the
        number of samples, and ``D`` is the number of dimensions of each
        sample.
    :param burn_in: The number of discarded samples.

    :return: A 1-D numpy array. The effective sample size.
    """
    current_ess = np.inf
    esses = []
    for d in range(samples.shape[1]):
        ess = effective_sample_size_1d(np.squeeze(samples[burn_in:, d]))
        assert ess >= 0
        if ess > 0:
            current_ess = min(current_ess, ess)

        esses.append(ess)
    return current_ess
项目:dcss_single_cell    作者:srmcc    | 项目源码 | 文件源码
def tru_plot9(X,labels,t,plot_suffix,clust_names,clust_color, plot_loc):
    """
    From clustering_on_transcript_compatibility_counts, see github for MIT license
    """
    unique_labels = np.unique(labels)
    plt.figure(figsize=(15,10))
    for i in unique_labels:
        ind = np.squeeze(labels == i)
        plt.scatter(X[ind,0],X[ind,1],c=clust_color[i],s=36,edgecolors='gray',
                    lw = 0.5, label=clust_names[i])        
    plt.legend(loc='upper right',bbox_to_anchor=(1.1, 1))
    plt.legend(loc='upper right',bbox_to_anchor=(1.19, 1.01))
    plt.title(t)
    plt.xlim([-20,20])
    plt.ylim([-20,20])
    plt.axis('off')
    plt.savefig(plot_loc+ 't-SNE_plot_tru_plot9_'+ plot_suffix +'.pdf', bbox_inches='tight')

    # Plot function with Zeisel's colors corresponding to labels