Python cv2 模块,applyColorMap() 实例源码

我们从Python开源项目中,提取了以下26个代码示例,用于说明如何使用cv2.applyColorMap()

项目:svm-street-detector    作者:morris-frank    | 项目源码 | 文件源码
def addHeatMaps(idir, hdir):
    print 'addHeatMaps: ' + hdir
    for i in os.listdir(hdir):
        if i.endswith(".png") and not i.endswith("_overlay.png"):
            filename = i[:-4]
            print filename
            heatmap = cv2.imread(hdir + i)
            image = cv2.imread(idir + i)
            if heatmap.shape != image.shape:
                print 'Shape not equal'
                continue

            heatmap_jet = cv2.applyColorMap(heatmap, cv2.COLORMAP_WINTER)
            heatmap = heatmap/255

            for c in range(0,3):
                #image[:,:,c] = image[:,:,c] + (0.1 + 0.9* heatmap[:,:,c]) * heatmap_jet[:,:,c]
                image[:, :, c] = heatmap_jet[:,:,c] +  np.multiply(image[:,:, c] , (1.0 - heatmap[:,:,c]))
            cv2.imwrite(hdir + filename + '_overlay.png', image)
项目:pytorch-explain-black-box    作者:jacobgil    | 项目源码 | 文件源码
def save(mask, img, blurred):
    mask = mask.cpu().data.numpy()[0]
    mask = np.transpose(mask, (1, 2, 0))

    mask = (mask - np.min(mask)) / np.max(mask)
    mask = 1 - mask
    heatmap = cv2.applyColorMap(np.uint8(255*mask), cv2.COLORMAP_JET)

    heatmap = np.float32(heatmap) / 255
    cam = 1.0*heatmap + np.float32(img)/255
    cam = cam / np.max(cam)

    img = np.float32(img) / 255
    perturbated = np.multiply(1 - mask, img) + np.multiply(mask, blurred)   

    cv2.imwrite("perturbated.png", np.uint8(255*perturbated))
    cv2.imwrite("heatmap.png", np.uint8(255*heatmap))
    cv2.imwrite("mask.png", np.uint8(255*mask))
    cv2.imwrite("cam.png", np.uint8(255*cam))
项目:Interactive-object-tracking    作者:abhishekarya286    | 项目源码 | 文件源码
def likelihood_map(prob_map,image) :
    '''This functon generates the likelihood map based on either obj-surr/dist model
       input: probability map
       output:likelihood map, an image(each pixel value=corresponding probability)'''

    global h_img,w_img,bin
    sf=256.0/bin
    image_10=image/sf 
    image_10=image_10.astype('uint8')
    # creating a likelihood image acc. to obj-surr or obj-distractor model
    a=image_10[:,:,0]
    a=a.ravel()
    b=image_10[:,:,1]
    b=b.ravel()
    c_=image_10[:,:,2]
    c_=c_.ravel()
    prob_image=prob_map[a,b,c_]
    prob_image=prob_image.reshape((h_img,w_img))
    prob_image1=prob_image*255
    prob_image1=prob_image1.astype('uint8')
    likemap=cv2.applyColorMap(prob_image1, cv2.COLORMAP_JET)
    return likemap,prob_image1
项目:Interactive-object-tracking    作者:abhishekarya286    | 项目源码 | 文件源码
def likelihood_map(prob_map,image) :
    '''This functon generates the likelihood map based on either obj-surr/dist model
       input: probability map
       output:likelihood map, an image(each pixel value=corresponding probability)'''

    global h_img,w_img,bin
    sf=256.0/bin
    image_10=image/sf 
    image_10=image_10.astype('uint8')
    # creating a likelihood image acc. to obj-surr or obj-distractor model
    a=image_10[:,:,0]
    a=a.ravel()
    b=image_10[:,:,1]
    b=b.ravel()
    c_=image_10[:,:,2]
    c_=c_.ravel()
    prob_image=prob_map[a,b,c_]
    prob_image=prob_image.reshape((h_img,w_img))
    prob_image1=prob_image*255
    prob_image1=prob_image1.astype('uint8')
    likemap=cv2.applyColorMap(prob_image1, cv2.COLORMAP_JET)
    return likemap,prob_image1
项目:keras-steering-angle-visualizations    作者:jacobgil    | 项目源码 | 文件源码
def visualize_hypercolumns(model, original_img):

    img = np.float32(cv2.resize(original_img, (200, 66))) / 255.0

    layers_extract = [9]

    hc = extract_hypercolumns(model, layers_extract, img)
    avg = np.product(hc, axis=0)
    avg = np.abs(avg)
    avg = avg / np.max(np.max(avg))

    heatmap = cv2.applyColorMap(np.uint8(255 * avg), cv2.COLORMAP_JET)
    heatmap = np.float32(heatmap) / np.max(np.max(heatmap))
    heatmap = cv2.resize(heatmap, original_img.shape[0:2][::-1])

    both = 255 * heatmap * 0.7 + original_img
    both = both / np.max(both)
    return both
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def recalculate(self):

        in_mark = self.g_pool.trim_marks.in_mark
        out_mark = self.g_pool.trim_marks.out_mark
        section = slice(in_mark,out_mark)

        # calc heatmaps
        for s in self.surfaces:
            if s.defined:
                s.generate_heatmap(section)

        # calc distirbution accross all surfaces.
        results = []
        for s in self.surfaces:
            gaze_on_srf  = s.gaze_on_srf_in_section(section)
            results.append(len(gaze_on_srf))
            self.metrics_gazecount = len(gaze_on_srf)

        if results == []:
            logger.warning("No surfaces defined.")
            return
        max_res = max(results)
        results = np.array(results,dtype=np.float32)
        if not max_res:
            logger.warning("No gaze on any surface for this section!")
        else:
            results *= 255./max_res
        results = np.uint8(results)
        results_c_maps = cv2.applyColorMap(results, cv2.COLORMAP_JET)

        for s,c_map in zip(self.surfaces,results_c_maps):
            heatmap = np.ones((1,1,4),dtype=np.uint8)*125
            heatmap[:,:,:3] = c_map
            s.metrics_texture = Named_Texture()
            s.metrics_texture.update_from_ndarray(heatmap)
项目:pytorch-smoothgrad    作者:pkdn    | 项目源码 | 文件源码
def save_cam_image(img, mask, filename):
    heatmap = cv2.applyColorMap(np.uint8(255*mask), cv2.COLORMAP_JET)
    heatmap = np.float32(heatmap) / 255
    cam = heatmap + np.float32(img)
    cam = cam / np.max(cam)
    cv2.imwrite(filename, np.uint8(255 * cam))
项目:traffic_video_analysis    作者:polltooh    | 项目源码 | 文件源码
def opencv_plot(des_name):
    densmap = np.fromfile(densmap_name, np.float32)
    densmap = np.reshape(densmap, (227, 227))
    #densmap = norm_image(densmap) * 100 
    densmap *= 100.0
    densmap[densmap >1 ] = 1
    densmap = norm_image(densmap) * 255
    densmap = densmap.astype(np.uint8)
    im_color = cv2.applyColorMap(densmap, cv2.COLORMAP_JET)
    cv2.imshow("im", im_color)
    cv2.waitKey(0)
项目:segmentation-visualization-training    作者:tkwoo    | 项目源码 | 文件源码
def predict_image(flag):
    t_start = cv2.getTickCount()
    config = tf.ConfigProto()
    # config.gpu_options.per_process_gpu_memory_fraction = 0.9
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    with open(os.path.join(flag.ckpt_dir, flag.ckpt_name, 'model.json'), 'r') as json_file:
            loaded_model_json = json_file.read()
    model = model_from_json(loaded_model_json)
    weight_list = sorted(glob(os.path.join(flag.ckpt_dir, flag.ckpt_name, "weight*")))
    model.load_weights(weight_list[-1])
    print "[*] model load : %s"%weight_list[-1]
    t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000 
    print "[*] model loading Time: %.3f ms"%t_total

    imgInput = cv2.imread(flag.test_image_path, 0)
    input_data = imgInput.reshape((1,256,256,1))

    t_start = cv2.getTickCount()
    result = model.predict(input_data, 1)
    t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000
    print "Predict Time: %.3f ms"%t_total

    imgMask = (result[0]*255).astype(np.uint8)
    imgShow = cv2.cvtColor(imgInput, cv2.COLOR_GRAY2BGR)
    _, imgMask = cv2.threshold(imgMask, int(255*flag.confidence_value), 255, cv2.THRESH_BINARY)
    imgMaskColor = cv2.applyColorMap(imgMask, cv2.COLORMAP_JET)
    # imgZero = np.zeros((256,256), np.uint8)
    # imgMaskColor = cv2.merge((imgZero, imgMask, imgMask))
    imgShow = cv2.addWeighted(imgShow, 0.9, imgMaskColor, 0.3, 0.0)
    output_path = os.path.join(flag.output_dir, os.path.basename(flag.test_image_path))
    cv2.imwrite(output_path, imgShow)
    print "SAVE:[%s]"%output_path
项目:segmentation-visualization-training    作者:tkwoo    | 项目源码 | 文件源码
def train_visualization_seg(self, model, epoch):
        image_name_list = sorted(glob(os.path.join(self.flag.data_path,'train/IMAGE/*/*.png')))
        print image_name_list

        image_name = image_name_list[-1]
        image_size = self.flag.image_size

        imgInput = cv2.imread(image_name, self.flag.color_mode)
        output_path = self.flag.output_dir
        input_data = imgInput.reshape((1,image_size,image_size,self.flag.color_mode*2+1))

        t_start = cv2.getTickCount()
        result = model.predict(input_data, 1)
        t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000
        print "[*] Predict Time: %.3f ms"%t_total

        imgMask = (result[0]*255).astype(np.uint8)
        imgShow = cv2.cvtColor(imgInput, cv2.COLOR_GRAY2BGR)
        imgMaskColor = cv2.applyColorMap(imgMask, cv2.COLORMAP_JET)
        imgShow = cv2.addWeighted(imgShow, 0.9, imgMaskColor, 0.4, 0.0)
        output_path = os.path.join(self.flag.output_dir, '%04d_'%epoch+os.path.basename(image_name))
        cv2.imwrite(output_path, imgShow)
        # print "SAVE:[%s]"%output_path
        # cv2.imwrite(os.path.join(output_path, 'img%04d.png'%epoch), imgShow)
        # cv2.namedWindow("show", 0)
        # cv2.resizeWindow("show", 800, 800)
        # cv2.imshow("show", imgShow)
        # cv2.waitKey(1)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def applyColorMap(gray, cmap='flame'):
    '''
    like cv2.applyColorMap(im_gray, cv2.COLORMAP_*) but with different color maps
    '''
    # TODO:implement more cmaps
    if cmap != 'flame':
        raise NotImplemented
    # TODO: make better
    mx = 256  # if gray.dtype==np.uint8 else 65535
    lut = np.empty(shape=(256, 3))
    cmap = (
        # taken from pyqtgraph GradientEditorItem
        (0, (0, 0, 0)),
        (0.2, (7, 0, 220)),
        (0.5, (236, 0, 134)),
        (0.8, (246, 246, 0)),
        (1.0, (255, 255, 255))
    )
    # build lookup table:
    lastval, lastcol = cmap[0]
    for step, col in cmap[1:]:
        val = int(step * mx)
        for i in range(3):
            lut[lastval:val, i] = np.linspace(
                lastcol[i], col[i], val - lastval)

        lastcol = col
        lastval = val

    s0, s1 = gray.shape
    out = np.empty(shape=(s0, s1, 3), dtype=np.uint8)

    for i in range(3):
        out[..., i] = cv2.LUT(gray, lut[:, i])
    return out
项目:Aesthetic_attributes_maps    作者:gautamMalu    | 项目源码 | 文件源码
def grad_cam(input_model, image, weights, feature_maps=None):
    #activation size of final convolition layer is 10x10"
    cam = np.ones((10, 10), dtype=np.float32)
    # Add weighted activation maps
    grads_val = weights
    for i in range(grads_val.shape[0]):
        # Added relu
        temp = (weights[i, :] * feature_maps[:, :, i])
        np.maximum(temp, 0, temp)
        cam += temp

    # resize and normalization
    del feature_maps
    cam = cv2.resize(cam, (299, 299))
    # Relu
    cam = np.maximum(cam, 0)
    cam = cam / np.max(cam)
    image = image[0, :]
    image -= np.min(image)
    image = np.minimum(image, 255)
   # print image.shape


    cam = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_JET)   
    cam = 0.5*np.float32(cam) + 0.5*np.float32(image)
    cam = 255 * cam / np.max(cam)
    return np.uint8(cam)
项目:grad-cam-pytorch    作者:kazuto1011    | 项目源码 | 文件源码
def save(self, filename, gcam, raw_image):
        gcam = cv2.applyColorMap(np.uint8(gcam * 255.0), cv2.COLORMAP_JET)
        gcam = gcam.astype(np.float) + raw_image.astype(np.float)
        gcam = gcam / gcam.max() * 255.0
        cv2.imwrite(filename, np.uint8(gcam))
项目:Lattice-Boltzmann-fluid-flow-in-Tensorflow    作者:loliverhennigh    | 项目源码 | 文件源码
def cube_save(domain, sess):
  frame = sess.run(domain.Vel[0])
  frame = np.sqrt(np.square(frame[0,:,shape[1]/2,:,0]) + np.square(frame[0,:,shape[1]/2,:,1]) + np.square(frame[0,:,shape[1]/2,:,2]))
  frame = np.uint8(255 * frame/np.max(frame))
  frame = cv2.applyColorMap(frame, 2)
  frame = cv2.resize(frame, (shape[2]*3, shape[0]*3))
  video.write(frame)
项目:Lattice-Boltzmann-fluid-flow-in-Tensorflow    作者:loliverhennigh    | 项目源码 | 文件源码
def car_save(domain, sess):
  frame = sess.run(domain.Vel[0])
  frame = np.sqrt(np.square(frame[0,:,:,0]) + np.square(frame[0,:,:,1]) + np.square(frame[0,:,:,2]))
  frame = np.uint8(255 * frame/np.max(frame))
  frame = cv2.applyColorMap(frame, 2)
  video.write(frame)
项目:Lattice-Boltzmann-fluid-flow-in-Tensorflow    作者:loliverhennigh    | 项目源码 | 文件源码
def car_save(domain, sess):
  frame = sess.run(domain.Vel[0])
  frame = np.sqrt(np.square(frame[0,:,:,0]) + np.square(frame[0,:,:,1]) + np.square(frame[0,:,:,2]))
  frame = np.uint8(255 * frame/np.max(frame))
  frame = cv2.applyColorMap(frame, 2)
  video.write(frame)
项目:Lattice-Boltzmann-fluid-flow-in-Tensorflow    作者:loliverhennigh    | 项目源码 | 文件源码
def lid_save(domain, sess):
  frame = sess.run(domain.Vel[0])
  frame = np.sqrt(np.square(frame[0,:,:,0]) + np.square(frame[0,:,:,1]) + np.square(frame[0,:,:,2]))
  frame = np.uint8(255 * frame/np.max(frame))
  frame = cv2.applyColorMap(frame, 2)
  video.write(frame)
项目:keras-steering-angle-visualizations    作者:jacobgil    | 项目源码 | 文件源码
def visualize_grad_cam(input_model, original_img, layer_name = "conv3_1"): 

    img = np.float32(cv2.resize(original_img, (200, 66))) / 255.0

    angle = input_model.predict(np.array([img]))
    print("The predicted angle is", 180.0 * angle[0][0] / scipy.pi, "degrees")

    model = Sequential()
    model.add(input_model)

    target_layer = lambda x: grad_cam_loss(x, angle)
    model.add(Lambda(target_layer,
                     output_shape = grad_cam_loss_output_shape))

    loss = K.sum(model.layers[-1].output)
    conv_output =  [l for l in model.layers[0].layers if l.name is layer_name][0].output
    grads = normalize(K.gradients(loss, conv_output)[0])
    gradient_function = K.function([model.layers[0].input], [conv_output, grads])

    output, grads_val = gradient_function([[img]])
    output, grads_val = output[0, :], grads_val[0, :, :, :]

    weights = np.mean(grads_val, axis = (0, 1))
    cam = np.ones(output.shape[0 : 2], dtype = np.float32)

    for i, w in enumerate(weights):
        cam += w * output[:, :, i]

    #ReLU:
    cam = np.maximum(cam, 0)
    cam = cam / np.max(cam)
    cam = cv2.resize(cam, tuple(original_img.shape[0:2][::-1]))

    cam = cv2.applyColorMap(np.uint8(255*cam), cv2.COLORMAP_JET)

    cam = 1.0 * np.float32(cam) + np.float32(original_img)
    cam = cam / np.max(cam)
    return cam
项目:caffeNetViewer    作者:birolkuyumcu    | 项目源码 | 文件源码
def showImg(self,label,img):
        if len(img.shape) == 2:
            img = cv2.applyColorMap(img, cv2.COLORMAP_JET)
            img = cv2.resize(img, (512, 512),cv2.INTER_AREA)
        height, width, byteValue = img.shape
        byteValue = byteValue * width
        timg = img.copy()
        image = QtGui.QImage(timg.data, width, height,byteValue, QtGui.QImage.Format_RGB888)
        label.setPixmap(QtGui.QPixmap(image).scaled(label.size(),aspectMode=QtCore.Qt.KeepAspectRatio))     


        """ visualize function from
        https://github.com/BVLC/caffe/blob/master/examples/00-classification.ipynb
        """
项目:caffeNetViewer    作者:birolkuyumcu    | 项目源码 | 文件源码
def vis_square(self, data):
        """Take an array of shape (n, height, width) or (n, height, width, 3)
           and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)"""

        print "Data Shape : ", data.shape

        # normalize data for display
        data = (data - data.min()) / (data.max() - data.min())

        # force the number of filters to be square
        n = int(np.ceil(np.sqrt(data.shape[0])))
        padding = (((0, n ** 2 - data.shape[0]),
                    (0, 1), (0, 1))  # add some space between filters
                   + ((0, 0),) * (data.ndim - 3))  # don't pad the last dimension (if there is one)
        data = np.pad(data, padding, mode='constant', constant_values=0)  # pad with ones (white)

        # tile the filters into an image
        data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
        data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])

        # show at display
        #print 'Data shape : ', data.shape , len(data.shape)
        img = 255 * data
        img = cv2.resize(img, (512, 512))
        img = np.array(img, dtype='uint8')
        img_c = cv2.applyColorMap(img, cv2.COLORMAP_JET)
        height, width, byteValue = img_c.shape
        byteValue = byteValue * width
        self.image = QtGui.QImage(img_c.data, width, height, byteValue, QtGui.QImage.Format_RGB888)
        self.ui.labelDisplay.setPixmap(QtGui.QPixmap(self.image))
项目:pytorch-grad-cam    作者:jacobgil    | 项目源码 | 文件源码
def show_cam_on_image(img, mask):
    heatmap = cv2.applyColorMap(np.uint8(255*mask), cv2.COLORMAP_JET)
    heatmap = np.float32(heatmap) / 255
    cam = heatmap + np.float32(img)
    cam = cam / np.max(cam)
    cv2.imwrite("cam.jpg", np.uint8(255 * cam))
项目:keras-grad-cam    作者:jacobgil    | 项目源码 | 文件源码
def grad_cam(input_model, image, category_index, layer_name):
    model = Sequential()
    model.add(input_model)

    nb_classes = 1000
    target_layer = lambda x: target_category_loss(x, category_index, nb_classes)
    model.add(Lambda(target_layer,
                     output_shape = target_category_loss_output_shape))

    loss = K.sum(model.layers[-1].output)
    conv_output =  [l for l in model.layers[0].layers if l.name is layer_name][0].output
    grads = normalize(K.gradients(loss, conv_output)[0])
    gradient_function = K.function([model.layers[0].input], [conv_output, grads])

    output, grads_val = gradient_function([image])
    output, grads_val = output[0, :], grads_val[0, :, :, :]

    weights = np.mean(grads_val, axis = (0, 1))
    cam = np.ones(output.shape[0 : 2], dtype = np.float32)

    for i, w in enumerate(weights):
        cam += w * output[:, :, i]

    cam = cv2.resize(cam, (224, 224))
    cam = np.maximum(cam, 0)
    heatmap = cam / np.max(cam)

    #Return to BGR [0..255] from the preprocessed image
    image = image[0, :]
    image -= np.min(image)
    image = np.minimum(image, 255)

    cam = cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_JET)
    cam = np.float32(cam) + np.float32(image)
    cam = 255 * cam / np.max(cam)
    return np.uint8(cam), heatmap
项目:fontkaruta_classifier    作者:suga93    | 项目源码 | 文件源码
def save_cam_image(img, mask, filename):
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    heatmap = cv2.applyColorMap(np.uint8(255.*mask), cv2.COLORMAP_JET)
    res = np.concatenate((img, heatmap), axis=1)
    cv2.imwrite(filename, res)
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def generate_heatmap(self,section):

        if self.cache is None:
            logger.warning('Surface cache is not build yet.')
            return


        x,y = self.real_world_size['x'],self.real_world_size['y']
        x = max(1,int(x))
        y = max(1,int(y))

        filter_size = int(int(self.heatmap_detail * x)/2)*2 +1
        std_dev = int(filter_size /6.)
        self.heatmap = np.ones((y,x,4),dtype=np.uint8)
        all_gaze = []

        for frame_idx,c_e in enumerate(self.cache[section]):
            if c_e:
                frame_idx+=section.start
                for gp in self.gaze_on_srf_by_frame_idx(frame_idx,c_e['m_from_screen']):
                    if gp['confidence']>=self.g_pool.min_data_confidence:
                        all_gaze.append(gp['norm_pos'])

        if not all_gaze:
            logger.warning("No gaze data on surface for heatmap found.")
            all_gaze.append((-1.,-1.))
        all_gaze = np.array(all_gaze)
        all_gaze *= [self.real_world_size['x'],self.real_world_size['y']]
        hist,xedge,yedge = np.histogram2d(all_gaze[:,0], all_gaze[:,1],
                                            bins=[x,y],
                                            range=[[0, self.real_world_size['x']], [0,self.real_world_size['y']]],
                                            normed=False,
                                            weights=None)


        hist = np.rot90(hist)

        #smoothing..
        hist = cv2.GaussianBlur(hist,(filter_size,filter_size),std_dev)
        maxval = np.amax(hist)
        if maxval:
            scale = 255./maxval
        else:
            scale = 0

        hist = np.uint8( hist*(scale) )

        #colormapping
        c_map = cv2.applyColorMap(hist, cv2.COLORMAP_JET)

        self.heatmap[:,:,:3] = c_map
        self.heatmap[:,:,3] = 125
        self.heatmap_texture = Named_Texture()
        self.heatmap_texture.update_from_ndarray(self.heatmap)
项目:Phy-Net    作者:loliverhennigh    | 项目源码 | 文件源码
def evaluate():
  """ Eval the system"""
  with tf.Graph().as_default():
    # make inputs
    state, boundary = inputs(empty=True, shape=shape)

    # unwrap
    y_1, small_boundary_mul, small_boundary_add, x_2, y_2 = continual_unroll_template(state, boundary)

    # calc electric and magnetic fields
    electric_field_generated = lattice_to_electric(x_2, boundary)
    magnetic_field_generated = lattice_to_magnetic(x_2)
    electric_norm_generated = field_to_norm(electric_field_generated)
    magnetic_norm_generated = field_to_norm(magnetic_field_generated)
    electric_field_true = lattice_to_electric(state, boundary)
    magnetic_field_true = lattice_to_magnetic(state)
    electric_norm_true = field_to_norm(electric_field_true)
    magnetic_norm_true = field_to_norm(magnetic_field_true)

    # restore network
    variables_to_restore = tf.all_variables()
    saver = tf.train.Saver(variables_to_restore)
    sess = tf.Session()
    ckpt = tf.train.get_checkpoint_state(RESTORE_DIR)
    if ckpt and ckpt.model_checkpoint_path:
      print("restoring file from " + ckpt.model_checkpoint_path)
      saver.restore(sess, ckpt.model_checkpoint_path)
    else:
      print("no chekcpoint file found from " + RESTORE_DIR + ", this is an error")
      exit()

    state_feed_dict, boundary_feed_dict = feed_dict(1, shape, FLAGS.lattice_size, 0, 0)
    fd = {state:state_feed_dict, boundary:boundary_feed_dict}
    y_1_g, small_boundary_mul_g, small_boundary_add_g = sess.run([y_1, small_boundary_mul, small_boundary_add], feed_dict=fd)

    # generate video
    for step in tqdm(xrange(FLAGS.video_length)):
      # calc generated frame compressed state
      state_feed_dict, boundary_feed_dict = feed_dict(1, shape, FLAGS.lattice_size, 0, step)
      fd = {state:state_feed_dict, boundary:boundary_feed_dict, y_1:y_1_g, small_boundary_mul:small_boundary_mul_g, small_boundary_add:small_boundary_add_g}
      x_2_g, y_1_g, m_f_g, m_f_t = sess.run([x_2, y_2, magnetic_norm_generated, magnetic_norm_true],feed_dict=fd)

      m_f_g = m_f_g
      m_f_t = m_f_t
      frame = np.concatenate([m_f_g, m_f_t, np.abs(m_f_g - m_f_t)], 2)[0]
      frame = np.uint8(255 * frame/np.max(frame))
      frame = cv2.applyColorMap(frame[:,:,0], 2)

      # write frame to video
      video.write(frame)

    # release video
    video.release()
    cv2.destroyAllWindows()
项目:Phy-Net    作者:loliverhennigh    | 项目源码 | 文件源码
def evaluate():
  """ Eval the system"""
  with tf.Graph().as_default():
    # make inputs
    state, boundary = inputs(empty=True, shape=shape)

    # unwrap
    y_1, small_boundary_mul, small_boundary_add, x_2, y_2 = continual_unroll_template(state, boundary)

    # calc velocity
    x_2_add = add_lattice(x_2)
    state_add = add_lattice(state)
    velocity_generated = lattice_to_vel(x_2_add)
    velocity_norm_generated = vel_to_norm(velocity_generated)
    velocity_true = lattice_to_vel(state_add)
    velocity_norm_true = vel_to_norm(velocity_true)

    # restore network
    variables_to_restore = tf.all_variables()
    saver = tf.train.Saver(variables_to_restore)
    sess = tf.Session()
    ckpt = tf.train.get_checkpoint_state(RESTORE_DIR)
    if ckpt and ckpt.model_checkpoint_path:
      print("restoring file from " + ckpt.model_checkpoint_path)
      saver.restore(sess, ckpt.model_checkpoint_path)
    else:
      print("no chekcpoint file found from " + RESTORE_DIR + ", this is an error")
      exit()

    state_feed_dict, boundary_feed_dict = feed_dict(1, shape, FLAGS.lattice_size, 0, 0)
    fd = {state:state_feed_dict, boundary:boundary_feed_dict}
    y_1_g, small_boundary_mul_g, small_boundary_add_g = sess.run([y_1, small_boundary_mul, small_boundary_add], feed_dict=fd)

    # generate video
    for step in tqdm(xrange(FLAGS.video_length)):
      # calc generated frame compressed state
      state_feed_dict, boundary_feed_dict = feed_dict(1, shape, FLAGS.lattice_size, 0, step)
      fd = {state:state_feed_dict, boundary:boundary_feed_dict, y_1:y_1_g, small_boundary_mul:small_boundary_mul_g, small_boundary_add:small_boundary_add_g}
      v_n_g, v_n_t, y_1_g = sess.run([velocity_norm_generated, velocity_norm_true, y_2],feed_dict=fd)

      # make frame for video
      if not d2d:
        v_n_g = v_n_g[:,10]
        v_n_t = v_n_t[:,10]
      frame = np.concatenate([v_n_g, v_n_t, np.abs(v_n_g - v_n_t)], 2)[0]
      frame = np.uint8(255 * frame/min(.25, np.max(frame)))
      frame = cv2.applyColorMap(frame[:,:,0], 2)

      # write frame to video
      video.write(frame)

    # release video
    video.release()
    cv2.destroyAllWindows()