Python scipy.misc 模块,imshow() 实例源码

我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用scipy.misc.imshow()

项目:Deep-Image-Matting    作者:Joker316701882    | 项目源码 | 文件源码
def main(args):

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = args.gpu_fraction)
    with tf.Session(config=tf.ConfigProto(gpu_options = gpu_options)) as sess:
        saver = tf.train.import_meta_graph('./meta_graph/my-model.meta')
        saver.restore(sess,tf.train.latest_checkpoint('./model'))
        image_batch = tf.get_collection('image_batch')[0]
        GT_trimap = tf.get_collection('GT_trimap')[0]
        pred_mattes = tf.get_collection('pred_mattes')[0]

        rgb = misc.imread(args.rgb)
        alpha = misc.imread(args.alpha,'L')
        trimap = generate_trimap(np.expand_dims(np.copy(alpha),2),np.expand_dims(alpha,2))[:,:,0]
        origin_shape = alpha.shape
        rgb = np.expand_dims(misc.imresize(rgb.astype(np.uint8),[320,320,3]).astype(np.float32)-g_mean,0)
        trimap = np.expand_dims(np.expand_dims(misc.imresize(trimap.astype(np.uint8),[320,320],interp = 'nearest').astype(np.float32),2),0)

        feed_dict = {image_batch:rgb,GT_trimap:trimap}
        pred_alpha = sess.run(pred_mattes,feed_dict = feed_dict)
        final_alpha = misc.imresize(np.squeeze(pred_alpha),origin_shape)
        # misc.imshow(final_alpha)
        misc.imsave('./alpha.png',final_alpha)
项目:gan-error-avoidance    作者:aleju    | 项目源码 | 文件源码
def get_batch(self, batch_size):
        print("Loading %d real images of max possible %d..." % (batch_size, self.train_index.size(0)))
        batch = torch.Tensor(batch_size, 3, self.opt.image_size, self.opt.image_size)
        for j in range(batch_size):
            img = self.get_data(self.train_index[self.counter])
            #print(img.size())
            batch[j].copy_(img)
            self.counter = (self.counter + 1) % self.train_index.size(0)
            #print("counter", self.counter)
        #print(np.average(batch.cpu().numpy()), np.min(batch.cpu().numpy()), np.max(batch.cpu().numpy()))
        batch_np = (batch.cpu().numpy() * 255).astype(np.uint8).transpose((0, 2, 3, 1))
        #return Variable(batch.cuda(), volatile=volatile)
        #from scipy import misc
        #misc.imshow(batch_np[0])
        return batch_np

#
# Background processes below
# Tensorflow and pytorch stuff is separated in different processes, because
# otherwies the two will bitch at each other
# The whole measuring of inception scores for checkpoints is in background
# processes, because otherwise python will not free the memory properly and
# run into out of memory errors sooner or later
#
项目:crnn-music-genre-classification    作者:meetshah1995    | 项目源码 | 文件源码
def log_scale_melspectrogram(path, plot=False):
    signal, sr = lb.load(path, sr=Fs)
    n_sample = signal.shape[0]
    n_sample_fit = int(DURA*Fs)

    if n_sample < n_sample_fit:
        signal = np.hstack((signal, np.zeros((int(DURA*Fs) - n_sample,))))
    elif n_sample > n_sample_fit:
        signal = signal[(n_sample-n_sample_fit)/2:(n_sample+n_sample_fit)/2]

    melspect = lb.logamplitude(lb.feature.melspectrogram(y=signal, sr=Fs, hop_length=N_OVERLAP, n_fft=N_FFT, n_mels=N_MELS)**2, ref_power=1.0)

    if plot:
        melspect = melspect[np.newaxis, :]
        misc.imshow(melspect.reshape((melspect.shape[1],melspect.shape[2])))
        print(melspect.shape)

    return melspect
项目:self-driving-truck    作者:aleju    | 项目源码 | 文件源码
def is_route_advisor_visible(self, scr, threshold=2):
        ra = self.get_route_advisor_image(scr)
        #misc.imshow(ra)
        #print("ra_shape", ra.shape)
        #assert ra.shape == (9, 3)
        #ra1d = np.average(ra, axis=2)
        ra_rows = np.average(ra, axis=1)
        #print("ra_rows.shape", ra_rows.shape)
        #print("ra_rows", ra_rows)
        expected = np.array([[ 25.33766234,  22.92207792,  21.94805195],
                    [ 31.79220779,  29.50649351,  28.58441558],
                    [ 70.32467532,  68.96103896,  68.32467532],
                    [ 63.51948052,  61.97402597,  61.2987013 ],
                    [ 66.20779221,  64.72727273,  64.14285714],
                    [ 64.12987013,  62.51948052,  62.01298701],
                    [ 60.61038961,  58.94805195,  58.20779221],
                    [ 65.31168831,  63.74025974,  63.12987013],
                    [ 18.18181818,  15.66233766,  14.51948052]], dtype=np.float32)

        #print("expected", ra_rows)
        #print("diff", ra_rows - expected)

        # evade brightness differences
        observed_normalized = ra_rows - np.average(ra_rows)
        expected_normalized = expected - np.average(expected)

        #print("observed_normalized", observed_normalized)
        #print("expected_normalized", expected_normalized)

        dist = np.abs(observed_normalized - expected_normalized)
        dist_avg = np.average(dist)
        #print("dist", dist)
        #print("dist_avg", dist_avg)
        return dist_avg < threshold

    # quite close scores even for some non-paused images
项目:self-driving-truck    作者:aleju    | 项目源码 | 文件源码
def is_offence_shown(self, scr, threshold=0.97):
        time_start = time.time()
        y1 = 584
        y2 = 591 + 1
        x1 = 1119
        x2 = 1180 + 1
        offence_area = scr[y1:y2, x1:x2, :]
        x, y, score = util.template_match(needle=self.offence_ff_image, haystack=offence_area)
        time_req = time.time() - time_start
        #print("in %.4fs" % (time_req,))
        #print("is_offence_shown", x, y, score)
        #misc.imshow(offence_area)
        return score >= threshold
项目:gan-error-avoidance    作者:aleju    | 项目源码 | 文件源码
def _score_images(self, checkpoint_result, opt, queue):
        from common.inception_score import get_inception_score

        result = pickle.loads(checkpoint_result)

        images = list(result["images"])

        augseq = AUGMENTATIONS[opt.augment]
        if augseq is not None:
            images_aug = augseq.augment_images(images)
        else:
            images_aug = images

        if images_aug[0].shape != (299, 299, 3):
            images_aug_rs = [misc.imresize(image, (299, 299)) for image in images_aug]
        else:
            images_aug_rs = images_aug
        #misc.imshow(np.hstack(list(images_aug[0:32])))
        #misc.imshow(np.hstack(list(images_aug_rs[0:5])))
        nb_splits = 1
        print("Calculating inception score on %d images at shape %s and %d splits..." % (len(images_aug_rs), str(images_aug_rs[0].shape), nb_splits))
        mean, std = get_inception_score(images_aug_rs, splits=nb_splits, bs=opt.inception_batch_size)

        result_str = pickle.dumps(
            ([mean], [std]),
            protocol=-1
        )
        queue.put(result_str)
项目:formation_python2017    作者:gouarin    | 项目源码 | 文件源码
def show(self) :
        """Affiche l'image a l'ecran"""
        misc.imshow(255*self.__pixels__)
项目:formation_python2017    作者:gouarin    | 项目源码 | 文件源码
def show(self) :
        """Affiche l'image a l'ecran"""
        misc.imshow(255*self.__pixels__)
项目:self-driving-truck    作者:aleju    | 项目源码 | 文件源码
def estimate_angle(self, image):
        #from scipy import misc
        subimg = cnn_extract_steering_wheel_image(image)
        #misc.imshow(subimg)
        subimg = cnn_downscale_image(subimg)
        #misc.imshow(subimg)
        angle_raw_bins = self.model.forward_image(subimg, volatile=True, requires_grad=False, gpu=Config.GPU, softmax=True)
        angle_raw_bins = angle_raw_bins.data[0].cpu().numpy()
        angle_raw_bin = np.argmax(angle_raw_bins)
        #print(angle_raw_bins.data.cpu().numpy())

        """
        angle_raw_center = angle_raw_bin * CNN_ANGLE_BIN_SIZE + CNN_ANGLE_BIN_SIZE * 0.5 - 180
        angle_raw_left = angle_raw_center - CNN_ANGLE_BIN_SIZE
        angle_raw_right = angle_raw_center + CNN_ANGLE_BIN_SIZE
        angle_raw_center_p = angle_raw_bins[angle_raw_bin]
        angle_raw_left_p = angle_raw_bins[angle_raw_bin-1] if angle_raw_bin-1 > 0 else 0
        angle_raw_right_p = angle_raw_bins[angle_raw_bin+1] if angle_raw_bin+1 < angle_raw_bins.shape[0] else 0

        angle_raw = angle_raw_left_p * angle_raw_left + angle_raw_center_p * angle_raw_center + angle_raw_right_p * angle_raw_right
        """
        angle_raw = angle_raw_bin * CNN_ANGLE_BIN_SIZE + CNN_ANGLE_BIN_SIZE * 0.5 - 180

        #print(angle_raw)
        possible_angles = [angle_raw]
        if angle_raw < 0:
            possible_angles.append(180+(180-abs(angle_raw)))
            possible_angles.append(-360-abs(angle_raw))
        if angle_raw > 0:
            possible_angles.append(-180-(180-abs(angle_raw)))
            possible_angles.append(360+abs(angle_raw))
        possible_angles_dist = [(a, abs(self.last_angle - a)) for a in possible_angles]
        possible_angles_dist_sort = sorted(possible_angles_dist, key=lambda t: t[1])
        angle = possible_angles_dist_sort[0][0]

        if angle > Config.STEERING_WHEEL_MAX:
            angle = angle - 360
        elif angle < Config.STEERING_WHEEL_MIN:
            angle = angle + 360

        if abs(angle - self.last_angle) >= self.overflow_degrees:
            if self.overflow_counter >= self.overflow_max_count:
                self.last_angle = angle
                self.last_angle_raw = angle_raw
                self.overflow_counter = 0
            else:
                angle = self.last_angle
                angle_raw = self.last_angle_raw
            self.overflow_counter += 1
        else:
            self.last_angle = angle
            self.last_angle_raw = angle_raw
            self.overflow_counter = 0

        return angle, angle_raw
项目:gan-error-avoidance    作者:aleju    | 项目源码 | 文件源码
def _generate_images(self, nb_batches, g_fp, r_idx, opt, show_info, queue):
        import torch
        import torch.nn as nn
        import torch.optim as optim
        import torchvision
        import torchvision.datasets as datasets
        import torchvision.transforms as transforms
        from torch.autograd import Variable

        #np.random.seed(42)
        #random.seed(42)
        #torch.manual_seed(42)

        gen = GeneratorLearnedInputSpace(opt.width, opt.height, opt.nfeature, opt.nlayer, opt.code_size, opt.norm, n_lis_layers=opt.r_iterations, upscaling=opt.g_upscaling)
        if show_info:
            print("G:", gen)
        gen.cuda()
        prefix = "last"
        gen.load_state_dict(torch.load(g_fp))
        gen.train()

        print("Generating images for checkpoint G'%s'..." % (g_fp,))
        #imgs_by_riter = [[] for _ in range(1+opt.r_iterations)]
        images_all = []
        for i in range(nb_batches):
            code = Variable(torch.randn(opt.batch_size, opt.code_size).cuda(), volatile=True)

            #for r_idx in range(1+opt.r_iterations):
            images, _ = gen(code, n_execute_lis_layers=r_idx)
            images_np = (images.data.cpu().numpy() * 255).astype(np.uint8).transpose((0, 2, 3, 1))

            #from scipy import misc
            #print(np.average(images[0]), np.min(images[0]), np.max(images[0]))
            #print(np.average(images_fixed[0]), np.min(images_fixed[0]), np.max(images_fixed[0]))
            #misc.imshow(list(images_np)[0])
            #misc.imshow(list(images_fixed)[0])

            #imgs_by_riter[r_idx].extend(list(images_np))
            images_all.extend(images_np)

        result_str = pickle.dumps({
            "g_fp": g_fp,
            "images": images_all
        }, protocol=-1)
        queue.put(result_str)