Python tensorflow 模块,sub() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.sub()

项目:deep-time-reading    作者:felixduvallet    | 项目源码 | 文件源码
def _activation_summary(x):
    """Helper to create summaries for activations.

    Creates a summary that provides a histogram of activations.
    Creates a summary that measure the sparsity of activations.

    Args:
      x: Tensor
    Returns:
      nothing
    """
    # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
    # session. This helps the clarity of presentation on tensorboard.
    tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
    tf.histogram_summary(tensor_name + '/activations', x)
    tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def apply_image_normalization(image, normalize_per_image=0) :
    if normalize_per_image == 0 : 
      image = tf.sub(image, 0.5)
      image = tf.mul(image, 2.0) # All pixels now between -1.0 and 1.0
      return image
    elif normalize_per_image == 1 :
      image = tf.mul(image, 2.0) # All pixels now between 0.0 and 2.0
      image = image - tf.reduce_mean(image, axis=[0, 1]) 
      # Most pixels should be between -1.0 and 1.0
      return image
    elif normalize_per_image == 2 :
      image = tf.image.per_image_standardization(image)
      image = tf.mul(image, 0.4) # This makes 98.8% of pixels between -1.0 and 1.0
      return image
    else :
      raise ValueError('invalid value for normalize_per_image: %d' % normalize_per_image)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_binary_ops_combined(self):
        # computation
        a = tf.placeholder(tf.float32, shape=(2, 3))
        b = tf.placeholder(tf.float32, shape=(2, 3))
        c = tf.add(a, b)
        d = tf.mul(c, a)
        e = tf.div(d, b)
        f = tf.sub(a, e)
        g = tf.maximum(a, f)

        # value
        a_val = np.random.rand(*tf_obj_shape(a))
        b_val = np.random.rand(*tf_obj_shape(b))

        # test
        self.run(g, tf_feed_dict={a: a_val, b: b_val})
项目:texture-networks    作者:ProofByConstruction    | 项目源码 | 文件源码
def style_loss(self, layers):
        activations = [self.activations_for_layer(i) for i in layers]
        gramians = [self.gramian_for_layer(x) for x in layers]
        # Slices are for style and synth image
        gramian_diffs = [
            tf.sub(
                tf.tile(tf.slice(g, [0, 0, 0], [self.num_style, -1, -1]), [self.num_synthesized - self.num_style + 1, 1, 1]),
                tf.slice(g, [self.num_style + self.num_content, 0, 0], [self.num_synthesized, -1, -1]))
            for g in gramians]
        Ns = [g.get_shape().as_list()[2] for g in gramians]
        Ms = [a.get_shape().as_list()[1] * a.get_shape().as_list()[2] for a in activations]
        scaled_diffs = [tf.square(g) for g in gramian_diffs]
        style_loss = tf.div(
            tf.add_n([tf.div(tf.reduce_sum(x), 4 * (N ** 2) * (M ** 2)) for x, N, M in zip(scaled_diffs, Ns, Ms)]),
            len(layers))
        return style_loss
项目:keras-mdn    作者:yanji84    | 项目源码 | 文件源码
def get_mixture_coef(output, KMIX=24, OUTPUTDIM=1):
  out_pi = tf.placeholder(dtype=tf.float32, shape=[None,KMIX], name="mixparam")
  out_sigma = tf.placeholder(dtype=tf.float32, shape=[None,KMIX], name="mixparam")
  out_mu = tf.placeholder(dtype=tf.float32, shape=[None,KMIX*OUTPUTDIM], name="mixparam")
  splits = tf.split(1, 2 + OUTPUTDIM, output)
  out_pi = splits[0]
  out_sigma = splits[1]
  out_mu = tf.pack(splits[2:], axis=2)
  out_mu = tf.transpose(out_mu, [1,0,2])
  # use softmax to normalize pi into prob distribution
  max_pi = tf.reduce_max(out_pi, 1, keep_dims=True)
  out_pi = tf.sub(out_pi, max_pi)
  out_pi = tf.exp(out_pi)
  normalize_pi = tf.inv(tf.reduce_sum(out_pi, 1, keep_dims=True))
  out_pi = tf.mul(normalize_pi, out_pi)
  # use exponential to make sure sigma is positive
  out_sigma = tf.exp(out_sigma)
  return out_pi, out_sigma, out_mu
项目:real_time_face_recognition    作者:shanren7    | 项目源码 | 文件源码
def triplet_loss(anchor, positive, negative, alpha):
  """Calculate the triplet loss according to the FaceNet paper

  Args:
    anchor: the embeddings for the anchor images.
    positive: the embeddings for the positive images.
    positive: the embeddings for the negative images.

  Returns:
    the triplet loss according to the FaceNet paper as a float tensor.
  """
  with tf.name_scope('triplet_loss'):
    pos_dist = tf.reduce_sum(tf.square(tf.sub(anchor, positive)), 1)  # Summing over distances in each batch
    neg_dist = tf.reduce_sum(tf.square(tf.sub(anchor, negative)), 1)

    basic_loss = tf.add(tf.sub(pos_dist,neg_dist), alpha)
    loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0, name='tripletloss')

  return loss
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def setUp(self):
    self.base_path = os.path.join(tf.test.get_temp_dir(), "no_vars")
    if not os.path.exists(self.base_path):
      os.mkdir(self.base_path)

    # Create a simple graph with a variable, then convert variables to
    # constants and export the graph.
    with tf.Graph().as_default() as g:
      x = tf.placeholder(tf.float32, name="x")
      w = tf.Variable(3.0)
      y = tf.sub(w * x, 7.0, name="y")  # pylint: disable=unused-variable
      tf.add_to_collection("meta", "this is meta")

      with self.test_session(graph=g) as session:
        tf.initialize_all_variables().run()
        new_graph_def = graph_util.convert_variables_to_constants(
            session, g.as_graph_def(), ["y"])

      filename = os.path.join(self.base_path, constants.META_GRAPH_DEF_FILENAME)
      tf.train.export_meta_graph(
          filename, graph_def=new_graph_def, collection_list=["meta"])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def setUp(self):
    self.base_path = os.path.join(tf.test.get_temp_dir(), "no_vars")
    if not os.path.exists(self.base_path):
      os.mkdir(self.base_path)

    # Create a simple graph with a variable, then convert variables to
    # constants and export the graph.
    with tf.Graph().as_default() as g:
      x = tf.placeholder(tf.float32, name="x")
      w = tf.Variable(3.0)
      y = tf.sub(w * x, 7.0, name="y")  # pylint: disable=unused-variable
      tf.add_to_collection("meta", "this is meta")

      with self.test_session(graph=g) as session:
        tf.global_variables_initializer().run()
        new_graph_def = graph_util.convert_variables_to_constants(
            session, g.as_graph_def(), ["y"])

      filename = os.path.join(self.base_path, constants.META_GRAPH_DEF_FILENAME)
      tf.train.export_meta_graph(
          filename, graph_def=new_graph_def, collection_list=["meta"])
项目:segmentation-models    作者:desimone    | 项目源码 | 文件源码
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

      Args:
        image: A `Tensor` representing an image of arbitrary size.
        output_height: The height of the image after preprocessing.
        output_width: The width of the image after preprocessing.
        is_training: `True` if we're preprocessing the image for training and
          `False` otherwise.

      Returns:
        A preprocessed image.
      """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(image, output_width,
                                                 output_height)
  image = tf.sub(image, 128.0)
  image = tf.div(image, 128.0)
  return image
项目:hyperstar    作者:nlpub    | 项目源码 | 文件源码
def __init__(self, x_size, y_size, w_stddev, **kwargs):
        self.x_size   = x_size
        self.y_size   = y_size
        self.w_stddev = w_stddev

        self.X = tf.placeholder(tf.float32, shape=[None, self.x_size], name='X')
        self.Y = tf.placeholder(tf.float32, shape=[None, self.y_size], name='Y')
        self.Z = tf.placeholder(tf.float32, shape=[None, self.x_size], name='Z')
        self.W = tf.Variable(tf.random_normal((self.x_size, self.y_size), stddev=self.w_stddev), name='W')

        self.Y_hat   = tf.matmul(self.X, self.W)
        self.Y_error = tf.sub(self.Y_hat, self.Y)
        self.Y_norm  = self.l2_norm(self.Y_error)

        self.Y_loss  = tf.nn.l2_loss(self.Y_norm)

        self.loss    = self.Y_loss
项目:seqrnns    作者:x75    | 项目源码 | 文件源码
def get_mixture_coef(output):
  out_pi = tf.placeholder(dtype=tf.float32, shape=[None,KMIX], name="mixparam")
  out_sigma = tf.placeholder(dtype=tf.float32, shape=[None,KMIX], name="mixparam")
  out_mu = tf.placeholder(dtype=tf.float32, shape=[None,KMIX], name="mixparam")

  out_pi, out_sigma, out_mu = tf.split(1, 3, output)

  max_pi = tf.reduce_max(out_pi, 1, keep_dims=True)
  out_pi = tf.sub(out_pi, max_pi)

  out_pi = tf.exp(out_pi)

  normalize_pi = tf.inv(tf.reduce_sum(out_pi, 1, keep_dims=True))
  out_pi = tf.mul(normalize_pi, out_pi)

  out_sigma = tf.exp(out_sigma)

  return out_pi, out_sigma, out_mu
项目:VAE_rec    作者:RobRomijnders    | 项目源码 | 文件源码
def tf_2d_normal(x1, x2, mu1, mu2, s1, s2, rho):
  """ 2D normal distribution
  input
  - x,mu: input vectors
  - s1,s2: standard deviances over x1 and x2
  - rho: correlation coefficient in x1-x2 plane
  """
  # eq # 24 and 25 of http://arxiv.org/abs/1308.0850
  norm1 = tf.sub(x1, mu1)
  norm2 = tf.sub(x2, mu2)
  s1s2 = tf.mul(s1, s2)
  z = tf.square(tf.div(norm1, s1))+tf.square(tf.div(norm2, s2))-2.0*tf.div(tf.mul(rho, tf.mul(norm1, norm2)), s1s2)
  negRho = 1-tf.square(rho)
  result = tf.exp(tf.div(-1.0*z,2.0*negRho))
  denom = 2*np.pi*tf.mul(s1s2, tf.sqrt(negRho))
  px1x2 = tf.div(result, denom)
  return px1x2
项目:neural-net-matrix-factorization    作者:jstol    | 项目源码 | 文件源码
def __init__(self, num_users, num_items, D=10, Dprime=60, hidden_units_per_layer=50,
                 latent_normal_init_params={'mean': 0.0, 'stddev': 0.1}, model_filename='model/nnmf.ckpt'):
        self.num_users = num_users
        self.num_items = num_items
        self.D = D
        self.Dprime = Dprime
        self.hidden_units_per_layer = hidden_units_per_layer
        self.latent_normal_init_params = latent_normal_init_params
        self.model_filename = model_filename

        # Internal counter to keep track of current iteration
        self._iters = 0

        # Input
        self.user_index = tf.placeholder(tf.int32, [None])
        self.item_index = tf.placeholder(tf.int32, [None])
        self.r_target = tf.placeholder(tf.float32, [None])

        # Call methods to initialize variables and operations (to be implemented by children)
        self._init_vars()
        self._init_ops()

        # RMSE
        self.rmse = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(self.r, self.r_target))))
项目:neural-net-matrix-factorization    作者:jstol    | 项目源码 | 文件源码
def _init_ops(self):
        # Loss
        reconstruction_loss = tf.reduce_sum(tf.square(tf.sub(self.r_target, self.r)), reduction_indices=[0])
        reg = tf.add_n([tf.reduce_sum(tf.square(self.Uprime), reduction_indices=[0,1]),
                        tf.reduce_sum(tf.square(self.U), reduction_indices=[0,1]),
                        tf.reduce_sum(tf.square(self.V), reduction_indices=[0,1]),
                        tf.reduce_sum(tf.square(self.Vprime), reduction_indices=[0,1])])
        self.loss = reconstruction_loss + (self.lam*reg)

        # Optimizer
        self.optimizer = tf.train.AdamOptimizer()
        # Optimize the MLP weights
        f_train_step = self.optimizer.minimize(self.loss, var_list=self.mlp_weights.values())
        # Then optimize the latents
        latent_train_step = self.optimizer.minimize(self.loss, var_list=[self.U, self.Uprime, self.V, self.Vprime])

        self.optimize_steps = [f_train_step, latent_train_step]
项目:reslearn    作者:mackcmillion    | 项目源码 | 文件源码
def _covariance_ops(image, covariance, total, mean, num_threads):
    num = tf.mul(tf.shape(image)[0], tf.shape(image)[1])
    num = tf.cast(num, tf.float32)

    mean_tiled = util.replicate_to_image_shape(image, mean)

    remainders = tf.sub(image, mean_tiled)
    remainders_stack = tf.pack([remainders, remainders, remainders])
    remainders_stack_transposed = tf.transpose(remainders_stack, [3, 1, 2, 0])
    pseudo_squares = tf.mul(remainders_stack, remainders_stack_transposed)
    sum_of_squares = tf.reduce_sum(pseudo_squares, [1, 2])

    queue = _make_queue([sum_of_squares, num], [[3, 3], []], num_threads, 'covariance_queue')

    img_sum_sq, img_num = queue.dequeue()
    covariance_update = covariance.assign_add(img_sum_sq)
    total_update = total.assign_add(img_num)

    return [covariance_update, total_update]
项目:the-neural-perspective    作者:GokuMohandas    | 项目源码 | 文件源码
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.sub(image, 128.0)
  image = tf.div(image, 128.0)
  return image
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def mlp(x, y):
    op_list = list()
    op_list.append(x)

    w_input = tf.Variable(tf.truncated_normal([n_input, n_hidden], stddev=0.1))
    b_input = tf.Variable(tf.constant(0., shape=[n_hidden]))
    op_list.append(tf.matmul(op_list[-1], w_input) + b_input)
    for i in range(n_h_layer):
        w = tf.Variable(tf.truncated_normal([n_hidden, n_hidden], stddev=0.1))
        b = tf.Variable(tf.constant(0., shape=[n_hidden]))
        op_list.append(tf.matmul(op_list[-1], w) + b)

    w_output = tf.Variable(tf.truncated_normal([n_hidden, n_output], stddev=0.1))
    b_output = tf.Variable(tf.constant(0., shape=[n_output]))
    op_list.append(tf.matmul(op_list[-1], w_output) + b_output)
    loss = tf.nn.l2_loss(tf.sub(op_list[-1], y))
    return op_list[-1], loss
项目:fast-neural-style    作者:coder-james    | 项目源码 | 文件源码
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.sub(image, 128.0)
  image = tf.div(image, 128.0)
  return image
项目:darkskies-challenge    作者:LiberiFatali    | 项目源码 | 文件源码
def preprocess_image(image_buffer):
  """Preprocess JPEG encoded bytes to 3D float Tensor."""

  # Decode the string as an RGB JPEG.
  # Note that the resulting image contains an unknown height and width
  # that is set dynamically by decode_jpeg. In other words, the height
  # and width of image is unknown at compile-time.
  image = tf.image.decode_jpeg(image_buffer, channels=3)
  # After this point, all image pixels reside in [0,1)
  # until the very end, when they're rescaled to (-1, 1).  The various
  # adjust_* ops all require this range for dtype float.
  image = tf.image.convert_image_dtype(image, dtype=tf.float32)
  # Crop the central region of the image with an area containing 87.5% of
  # the original image.
  #image = tf.image.central_crop(image, central_fraction=0.875)
  # Resize the image to the original height and width.
  image = tf.expand_dims(image, 0)
  image = tf.image.resize_bilinear(image,
                                   [IMAGE_SIZE, IMAGE_SIZE],
                                   align_corners=False)
  image = tf.squeeze(image, [0])
  # Finally, rescale to [-1,1] instead of [0, 1)
  image = tf.sub(image, 0.5)
  image = tf.mul(image, 2.0)
  return image
项目:dcn.tf    作者:beopst    | 项目源码 | 文件源码
def extract_patches(inputs, size, offsets):

    batch_size = inputs.get_shape()[0]

    padded = tf.pad(inputs, [[0,0],[2,2],[2,2],[0,0]])
    unpacked = tf.unpack(tf.squeeze(padded))

    extra_margins = tf.constant([1,1,2,2])

    sliced_list = []
    for i in xrange(batch_size.value):

        margins = tf.random_shuffle(extra_margins)
        margins = margins[:2]
        start_pts = tf.sub(offsets[i,:],margins)
        sliced = tf.slice(unpacked[i],start_pts,size)
        sliced_list.append(sliced)

    patches = tf.pack(sliced_list)
    patches = tf.expand_dims(patches,3)

    return patches
项目:sparsecnn    作者:fkiaee    | 项目源码 | 文件源码
def block_shrinkage_conv(V,mu,rho):
    coef = 0.5
    V_shape = tf.shape(V); one_val = tf.constant(1.0) 
    b = tf.div(mu,rho)
    V_shape1 = tf.concat(0,[tf.mul(tf.slice(V_shape,[2],[1]),tf.slice(V_shape,[3],[1])),tf.mul(tf.slice(V_shape,[0],[1]),tf.slice(V_shape,[1],[1]))])
    V = tf.reshape(tf.transpose(V,perm=[2,3,0,1]),V_shape1)
    norm_V = frobenius_norm_block(V,1)  
    norm_V_per_dimension = tf.div(norm_V,tf.cast(tf.slice(V_shape1,[1],[1]),'float'))
    zero_part = tf.zeros(V_shape1)
    zero_ind = tf.greater_equal(b,norm_V_per_dimension)
    num_zero = tf.reduce_sum(tf.cast(zero_ind,'float'))
#    f4 = lambda: tf.greater_equal(tf.truediv(tf.add(tf.reduce_min(fro),tf.reduce_mean(fro)),2.0),fro)
    f4 = lambda: tf.greater_equal(tf.reduce_mean(norm_V),norm_V)
    f5 = lambda: zero_ind
    zero_ind = tf.cond(tf.greater(num_zero,tf.mul(coef,tf.cast(V_shape1[0],'float'))),f4,f5)
    G = tf.select(zero_ind,zero_part,tf.mul(tf.sub(one_val,tf.div(b,tf.reshape(norm_V,[-1,1]))),V)) 
    G_shape = tf.concat(0,[tf.slice(V_shape,[2],[1]),tf.slice(V_shape,[3],[1]),tf.slice(V_shape,[0],[1]),tf.slice(V_shape,[1],[1])])
    G = tf.transpose(tf.reshape(G,G_shape),perm=[2,3,0,1])
    return G,zero_ind
项目:DeepSEA    作者:momeara    | 项目源码 | 文件源码
def build_continuous_jaccard_distance_network(fps1, fps2):
    """
    Given two batches of fingerprints, compute the distance between
    the i'th fingerprint from each batch, using the continuous
    generalization of the Jaccard distance:

        1 - \Sum(min(x_i, y_i)) / \Sum(max(x_i, y_i))

    see (Duvenaud, NIPS 2015)

    """
    intersect = tf.reduce_sum(tf.minimum(fps1, fps2), [1], name="intersect")
    union = tf.reduce_sum(tf.maximum(fps1, fps2), [1], name="union")
    tanimoto = tf.div(intersect, union, name="tanimoto")
    jaccard = tf.sub(1.0, tanimoto, name="jaccard")
    return jaccard
项目:DeepSEA    作者:momeara    | 项目源码 | 文件源码
def build_triple_score_network(distance_to_plus, distance_to_minus, model_params):
    """
    Compute the hinge loss triple loss from (Wang, CVPR2014)

    The hinge loss is a convex approximation to the 0-1 ranking error
    loss, which measures the model's violation of the ranking order
    specified in the triplet.

    The score_gap parameter favors a gap between the distance of the two image pairs
    """

    triple_score = tf.nn.relu(tf.add(
        model_params['score_gap'],
        tf.sub(distance_to_plus, distance_to_minus)),
        name="triple_score")

    return triple_score
项目:DP_for_FaceNet    作者:guchinoma    | 项目源码 | 文件源码
def triplet_loss(anchor, positive, negative, alpha):
    """Calculate the triplet loss according to the FaceNet paper

    Args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.

    Returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.reduce_sum(tf.square(tf.sub(anchor, positive)), 1)  # Summing over distances in each batch
        neg_dist = tf.reduce_sum(tf.square(tf.sub(anchor, negative)), 1)

        basic_loss = tf.add(tf.sub(pos_dist,neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)

    return loss
项目:DP_for_FaceNet    作者:guchinoma    | 项目源码 | 文件源码
def triplet_loss(anchor, positive, negative, alpha):
    """Calculate the triplet loss according to the FaceNet paper

    Args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.

    Returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.reduce_sum(tf.square(tf.sub(anchor, positive)), 1)  # Summing over distances in each batch
        neg_dist = tf.reduce_sum(tf.square(tf.sub(anchor, negative)), 1)

        basic_loss = tf.add(tf.sub(pos_dist,neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)

    return loss
项目:Face-Recognition    作者:aswl01    | 项目源码 | 文件源码
def triplet_loss(anchor, positive, negative, alpha):
    """Calculate the triplet loss according to the FaceNet paper

    Args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.

    Returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.reduce_sum(tf.square(tf.sub(anchor, positive)), 1)  # Summing over distances in each batch
        neg_dist = tf.reduce_sum(tf.square(tf.sub(anchor, negative)), 1)

        basic_loss = tf.add(tf.sub(pos_dist, neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)

    return loss
项目:ternarynet    作者:czhu95    | 项目源码 | 文件源码
def class_balanced_binary_class_cross_entropy(pred, label, name='cross_entropy_loss'):
    """
    The class-balanced cross entropy loss for binary classification,
    as in `Holistically-Nested Edge Detection
    <http://arxiv.org/abs/1504.06375>`_.

    :param pred: size: b x ANYTHING. the predictions in [0,1].
    :param label: size: b x ANYTHING. the ground truth in {0,1}.
    :returns: class-balanced binary classification cross entropy loss
    """
    z = batch_flatten(pred)
    y = tf.cast(batch_flatten(label), tf.float32)

    count_neg = tf.reduce_sum(1. - y)
    count_pos = tf.reduce_sum(y)
    beta = count_neg / (count_neg + count_pos)

    eps = 1e-8
    loss_pos = -beta * tf.reduce_mean(y * tf.log(tf.abs(z) + eps), 1)
    loss_neg = (1. - beta) * tf.reduce_mean((1. - y) * tf.log(tf.abs(1. - z) + eps), 1)
    cost = tf.sub(loss_pos, loss_neg)
    cost = tf.reduce_mean(cost, name=name)
    return cost
项目:dqa-net    作者:allenai    | 项目源码 | 文件源码
def softmax_with_base(shape, base_untiled, x, mask=None, name='sig'):
    if mask is not None:
        x += VERY_SMALL_NUMBER * (1.0 - mask)
    base_shape = shape[:-1] + [1]
    for _ in shape:
        base_untiled = tf.expand_dims(base_untiled, -1)
    base = tf.tile(base_untiled, base_shape)

    c_shape = shape[:-1] + [shape[-1] + 1]
    c = tf.concat(len(shape)-1, [base, x])
    c_flat = tf.reshape(c, [reduce(mul, shape[:-1], 1), c_shape[-1]])
    p_flat = tf.nn.softmax(c_flat)
    p_cat = tf.reshape(p_flat, c_shape)
    s_aug = tf.slice(p_cat, [0 for _ in shape], [i for i in shape[:-1]] + [1])
    s = tf.squeeze(s_aug, [len(shape)-1])
    sig = tf.sub(1.0, s, name="sig")
    p = tf.slice(p_cat, [0 for _ in shape[:-1]] + [1], shape)
    return sig, p
项目:the-neural-perspective    作者:johnsonc    | 项目源码 | 文件源码
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.sub(image, 128.0)
  image = tf.div(image, 128.0)
  return image
项目:irgan    作者:geek-ai    | 项目源码 | 文件源码
def __init__(self, sequence_length, batch_size,vocab_size, embedding_size,filter_sizes, num_filters, dropout_keep_prob=1.0,l2_reg_lambda=0.0,learning_rate=1e-2,paras=None,embeddings=None,loss="pair",trainable=True):

        QACNN.__init__(self, sequence_length, batch_size,vocab_size, embedding_size,filter_sizes, num_filters, dropout_keep_prob=dropout_keep_prob,l2_reg_lambda=l2_reg_lambda,paras=paras,learning_rate=learning_rate,embeddings=embeddings,loss=loss,trainable=trainable)
        self.model_type="Dis"


        with tf.name_scope("output"):

            self.losses = tf.maximum(0.0, tf.sub(0.05, tf.sub(self.score12, self.score13)))
            self.loss = tf.reduce_sum(self.losses) + self.l2_reg_lambda * self.l2_loss

            self.reward = 2.0*(tf.sigmoid(tf.sub(0.05, tf.sub(self.score12, self.score13))) -0.5) # no log
            self.positive= tf.reduce_mean(self.score12)
            self.negative= tf.reduce_mean( self.score13)

            self.correct = tf.equal(0.0, self.losses)
            self.accuracy = tf.reduce_mean(tf.cast(self.correct, "float"), name="accuracy")


        self.global_step = tf.Variable(0, name="global_step", trainable=False)
        optimizer = tf.train.AdamOptimizer(self.learning_rate)
        grads_and_vars = optimizer.compute_gradients(self.loss)
        capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in grads_and_vars]
        self.train_op = optimizer.apply_gradients(capped_gvs, global_step=self.global_step)
项目:MusicGenerator    作者:Conchylicultor    | 项目源码 | 文件源码
def __call__(X):
        """ Predict the output from prev and scale the result on [-1, 1]
        Use sigmoid activation
        Args:
            X (tf.Tensor): the input
        Return:
            tf.Ops: the activate_and_scale operator
        """
        # TODO: Use tanh instead ? tanh=2*sigm(2*x)-1
        with tf.name_scope('activate_and_scale'):
            return tf.sub(tf.mul(2.0, tf.nn.sigmoid(X)), 1.0)  # x_{i} = 2*sigmoid(y_{i-1}) - 1
项目:rlflow    作者:tpbarron    | 项目源码 | 文件源码
def stddev(x):
    x = tf.to_float(x)
    return tf.sqrt(tf.reduce_mean(tf.square(tf.abs
        (tf.sub(x, tf.fill(x.get_shape(), tf.reduce_mean(x)))))))
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def preprocess_for_eval(image, height, width,
                        central_fraction=0.875, scope=None):
  """Prepare one image for evaluation.

  If height and width are specified it would output an image with that size by
  applying resize_bilinear.

  If central_fraction is specified it would cropt the central fraction of the
  input image.

  Args:
    image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
      [0, 1], otherwise it would converted to tf.float32 assuming that the range
      is [0, MAX], where MAX is largest positive representable number for
      int(8/16/32) data type (see `tf.image.convert_image_dtype` for details)
    height: integer
    width: integer
    central_fraction: Optional Float, fraction of the image to crop.
    scope: Optional scope for name_scope.
  Returns:
    3-D float Tensor of prepared image.
  """
  with tf.name_scope(scope, 'eval_image', [image, height, width]):
    if image.dtype != tf.float32:
      image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    # Crop the central region of the image with an area containing 87.5% of
    # the original image.
    if central_fraction:
      image = tf.image.central_crop(image, central_fraction=central_fraction)

    if height and width:
      # Resize the image to the specified height and width.
      image = tf.expand_dims(image, 0)
      image = tf.image.resize_bilinear(image, [height, width],
                                       align_corners=False)
      image = tf.squeeze(image, [0])
    image = tf.sub(image, 0.5)
    image = tf.mul(image, 2.0)
    return image
项目:tensorflow_homographynet    作者:linjian93    | 项目源码 | 文件源码
def main(_):
    test_img_list = load_data(dir_test)
    mean_var = np.load('log/log_mycnn/mean_var_out.npz')

    x1 = tf.placeholder(tf.float32, [None, 128, 128, 2])  # data
    x2 = tf.placeholder(tf.float32, [None, 8])  # label
    x4 = tf.placeholder(tf.float32, [])  # dropout
    net = Mycnn(x1, x4, bn_in=mean_var.f.arr_0)
    fc2 = net.out

    loss = tf.reduce_sum(tf.square(tf.sub(fc2, x2))) / 2 / batch_size

    # gpu configuration
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    # gpu_opinions = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)

    saver = tf.train.Saver(max_to_keep=None)

    with tf.Session(config=tf_config) as sess:
        saver.restore(sess, dir_load)
        test_model = DataSet(test_img_list)
        loss_total = []
        for i in range(iter_max):
            x_batch_test, y_batch_test, h1_test, img1, img2 = test_model.next_batch()
            np.savetxt(((dir_save + '/h1_%d.txt') % i), h1_test)
            np.savetxt(((dir_save + '/label_%d.txt') % i), y_batch_test)
            cv2.imwrite(((dir_save + '/image_%d_1.jpg') % i), img1)
            cv2.imwrite(((dir_save + '/image_%d_2.jpg') % i), img2)

            pre, average_loss = sess.run([fc2, loss], feed_dict={x1: x_batch_test, x2: y_batch_test, x4: 1.0})

            np.savetxt(((dir_save + '/predict_%d.txt') % i), pre)
            loss_total.append(average_loss)

            print ('iter %05d, test loss = %.5f' % ((i+1), average_loss))

        np.savetxt((dir_save + '/loss.txt'), loss_total)
项目:CDBN-for-Tensorflow    作者:shygiants    | 项目源码 | 文件源码
def __pool(self):
        # Pooling
        with tf.name_scope('pooling') as _:
            self.pooled_prob = tf.sub(1., tf.div(1., 1. + tf.mul(self.__avg_pool(tf.exp(self.signal)), self.pool_size * self.pool_size)), name='pooled_prob')
            self.pooled = self.__sample(self.pooled_prob, 'pooled')
项目:CDBN-for-Tensorflow    作者:shygiants    | 项目源码 | 文件源码
def __gradient_ascent(self):
        # Gradient ascent
        with tf.name_scope('gradient') as _:
            self.grad_bias = tf.mul(tf.reduce_mean(self.hid_prob0 - self.hid_prob1, [0, 1, 2]),
                                    self.learning_rate * self.batch_size, name='grad_bias')
            self.grad_cias = tf.mul(tf.reduce_mean(self.vis_0 - self.vis_1, [0, 1, 2]),
                                    self.learning_rate * self.batch_size, name='grad_cias')

            # TODO: Is there any method to calculate batch-elementwise convolution?
            temp_grad_weights = tf.zeros(self.weight_shape)
            hid_filter0 = tf.reverse(self.hid_prob0, [False, True, True, False])
            hid_filter1 = tf.reverse(self.hid_prob1, [False, True, True, False])
            for idx in range(0, self.batch_size):
                hid0_ith = self.__get_ith_hid_4d(hid_filter0, idx)
                hid1_ith = self.__get_ith_hid_4d(hid_filter1, idx)

                positive = [0] * self.depth
                negative = [0] * self.depth
                one_ch_conv_shape = [self.width, self.height, 1, self.num_features]
                for jdx in range(0, self.depth):
                    positive[jdx] = tf.reshape(self.__conv2d(self.__get_ij_vis_4d(self.vis_0, idx, jdx), hid0_ith),
                                               one_ch_conv_shape)
                    negative[jdx] = tf.reshape(self.__conv2d(self.__get_ij_vis_4d(self.vis_1, idx, jdx), hid1_ith),
                                               one_ch_conv_shape)
                positive = tf.concat(2, positive)
                negative = tf.concat(2, negative)
                temp_grad_weights = tf.add(temp_grad_weights,
                                           tf.slice(tf.sub(positive, negative), [0, 0, 0, 0], self.weight_shape))

            self.grad_weights = tf.mul(temp_grad_weights, self.learning_rate / (self.width * self.height))
        self.gradient_ascent = [self.weights.assign_add(self.grad_weights),
                                self.bias.assign_add(self.grad_bias),
                                self.cias.assign_add(self.grad_cias)]
项目:dlbench    作者:hclhkbu    | 项目源码 | 文件源码
def image_preprocessing(image_buffer, bbox, train, thread_id=0):
    """Decode and preprocess one image for evaluation or training.

  Args:
    image_buffer: JPEG encoded string Tensor
    bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
      where each coordinate is [0, 1) and the coordinates are arranged as
      [ymin, xmin, ymax, xmax].
    train: boolean
    thread_id: integer indicating preprocessing thread

  Returns:
    3-D float Tensor containing an appropriately scaled image

  Raises:
    ValueError: if user does not provide bounding box
  """
    if bbox is None:
        raise ValueError('Please supply a bounding box.')

    image = decode_jpeg(image_buffer)
    height = FLAGS.input_size
    width = FLAGS.input_size

    if train:
        image = distort_image(image, height, width, bbox, thread_id)
    else:
        image = eval_image(image, height, width)

    # Finally, rescale to [-1,1] instead of [0, 1)
    image = tf.sub(image, 0.5)
    image = tf.mul(image, 2.0)
    return image
项目:TensorFlow-Machine-Learning-Cookbook    作者:PacktPublishing    | 项目源码 | 文件源码
def custom_polynomial(x_val):
    # Return 3x^2 - x + 10
    return(tf.sub(3 * tf.square(x_val), x_val) + 10)
项目:Deep-learning-Colorization-for-visual-media    作者:OmarSayedMostafa    | 项目源码 | 文件源码
def Train():
    global AbColores_values
    global CurrentBatch_indx
    global GreyImages_Batch
    global EpochsNum
    global ExamplesNum
    global Batch_size
    Input_images = tf.placeholder(dtype=tf.float32,shape=[None,224,224,1],name="X_inputs")
    Ab_Labels_tensor = tf.placeholder(dtype=tf.float32,shape=[None,224,224,2],name="Labels_inputs")
    Prediction = TriainModel(Input_images) 
    Colorization_MSE = tf.reduce_mean((Frobenius_Norm(tf.sub(Prediction,Ab_Labels_tensor))))
    Optmizer = tf.train.AdamOptimizer().minimize(Colorization_MSE)
    #sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    saver = tf.train.import_meta_graph('Model Directory/our_model.meta')
    saver.restore(sess, 'Model Directory/our_model')
    PrevLoss = 0
    for epoch in range(EpochsNum):
        epoch_loss = 0
        CurrentBatch_indx = 1
        for i in range(int(ExamplesNum / Batch_size)):#Over batches
           print("Batch Num ",i + 1)
           ReadNextBatch()
           a, c = sess.run([Optmizer,Colorization_MSE],feed_dict={Input_images:GreyImages_Batch,Ab_Labels_tensor:AbColores_values})
           epoch_loss += c
        print("epoch: ",epoch + 1, ",Loss: ",epoch_loss,", Diff:",PrevLoss - epoch_loss)
        PrevLoss = epoch_loss

    saver.save(sess, 'Model Directory/our_model',write_meta_graph=False)
项目:TF-phrasecut-public    作者:chenxi116    | 项目源码 | 文件源码
def dsc_loss(scores, labels):
    scores = tf.sigmoid(scores)
    inter = tf.scalar_mul(2., tf.reduce_sum(tf.multiply(scores, labels), [1, 2, 3]))
    union = tf.add(tf.reduce_sum(scores, [1, 2, 3]), tf.reduce_sum(labels, [1, 2, 3]))
    dsc_loss = tf.reduce_mean(tf.sub(1., tf.div(inter, union)))

    return dsc_loss
项目:TF-phrasecut-public    作者:chenxi116    | 项目源码 | 文件源码
def iou_loss(scores, labels):
    scores = tf.sigmoid(scores)
    inter = tf.reduce_sum(tf.multiply(scores, labels), [1, 2, 3])
    union = tf.add(tf.reduce_sum(scores, [1, 2, 3]), tf.reduce_sum(labels, [1, 2, 3]))
    union = tf.sub(union, inter)
    iou_loss = tf.reduce_mean(tf.sub(1., tf.div(inter, union)))

    return iou_loss
项目:FeatureSqueezing    作者:QData    | 项目源码 | 文件源码
def tf_model_eval_distance(sess, x, model1, model2, X_test):
    """
    Compute the L1 distance between prediction of original and squeezed data.
    :param sess: TF session to use when training the graph
    :param x: input placeholder
    :param model1: model output original predictions
    :param model2: model output squeezed predictions
    :param X_test: numpy array with training inputs
    :return: a float vector with the distance value
    """
    # Define sympbolic for accuracy
    # acc_value = keras.metrics.categorical_accuracy(y, model)

    l2_diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(model1, model2)),
                                    axis=1))
    l_inf_diff = tf.reduce_max(tf.abs(tf.sub(model1, model2)), axis=1)
    l1_diff = tf.reduce_sum(tf.abs(tf.sub(model1, model2)), axis=1)

    l1_dist_vec = np.zeros((len(X_test)))

    with sess.as_default():
        # Compute number of batches
        nb_batches = int(math.ceil(float(len(X_test)) / FLAGS.batch_size))
        assert nb_batches * FLAGS.batch_size >= len(X_test)

        for batch in range(nb_batches):
            if batch % 100 == 0 and batch > 0:
                print("Batch " + str(batch))

            # Must not use the `batch_indices` function here, because it
            # repeats some examples.
            # It's acceptable to repeat during training, but not eval.
            start = batch * FLAGS.batch_size
            end = min(len(X_test), start + FLAGS.batch_size)
            cur_batch_size = end - start

            l1_dist_vec[start:end] = l1_diff.eval(feed_dict={x: X_test[start:end],keras.backend.learning_phase(): 0})

        assert end >= len(X_test)
    return l1_dist_vec
项目:texture-networks    作者:ProofByConstruction    | 项目源码 | 文件源码
def spatial_batch_norm(input_layer, name='spatial_batch_norm'):
    """
    Batch-normalizes the layer as in http://arxiv.org/abs/1502.03167
    This is important since it allows the different scales to talk to each other when they get joined.
    """
    mean, variance = tf.nn.moments(input_layer, [0, 1, 2])
    variance_epsilon = 0.01  # TODO: Check what this value should be
    inv = tf.rsqrt(variance + variance_epsilon)
    num_channels = input_layer.get_shape().as_list()[3]  # TODO: Clean this up
    scale = tf.Variable(tf.random_uniform([num_channels]), name='scale')  # TODO: How should these initialize?
    offset = tf.Variable(tf.random_uniform([num_channels]), name='offset')
    return_val = tf.sub(tf.mul(tf.mul(scale, inv), tf.sub(input_layer, mean)), offset, name=name)
    return return_val
项目:texture-networks    作者:ProofByConstruction    | 项目源码 | 文件源码
def content_loss(self, layers):
        activations = [self.activations_for_layer(i) for i in layers]
        activation_diffs = [
            tf.sub(
                tf.tile(tf.slice(a, [self.num_style, 0, 0, 0], [self.num_content, -1, -1, -1]), [self.num_synthesized - self.num_content + 1, 1, 1, 1]),
                tf.slice(a, [self.num_style + self.num_content, 0, 0, 0], [self.num_content, -1, -1, -1]))
            for a in activations]
        # This normalizer is in JCJohnson's paper, but not Gatys' I think?
        Ns = [a.get_shape().as_list()[1] * a.get_shape().as_list()[2] * a.get_shape().as_list()[3] for a in activations]
        content_loss = tf.div(tf.add_n([tf.div(tf.reduce_sum(tf.square(a)), n) for a, n in zip(activation_diffs, Ns)]), 2.0)
        return content_loss
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def preprocess_for_eval(image, height, width,
                        central_fraction=0.875, scope=None):
    """Prepare one image for evaluation.

    If height and width are specified it would output an image with that size by
    applying resize_bilinear.

    If central_fraction is specified it would cropt the central fraction of the
    input image.

    Args:
        image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
            [0, 1], otherwise it would converted to tf.float32 assuming that the range
            is [0, MAX], where MAX is largest positive representable number for
            int(8/16/32) data type (see `tf.image.convert_image_dtype` for details)
        height: integer
        width: integer
        central_fraction: Optional Float, fraction of the image to crop.
        scope: Optional scope for name_scope.
    Returns:
        3-D float Tensor of prepared image.
    """
    with tf.name_scope(scope, 'eval_image', [image, height, width]):
        if image.dtype != tf.float32:
            image = tf.image.convert_image_dtype(image, dtype=tf.float32)
        # Crop the central region of the image with an area containing 87.5% of
        # the original image.
        if central_fraction:
            image = tf.image.central_crop(image, central_fraction=central_fraction)

        if height and width:
            # Resize the image to the specified height and width.
            image = tf.expand_dims(image, 0)
            image = tf.image.resize_bilinear(image, [height, width],
                                             align_corners=False)
            image = tf.squeeze(image, [0])
        image = tf.sub(image, 0.5)
        image = tf.mul(image, 2.0)
        return image
项目:keras-mdn    作者:yanji84    | 项目源码 | 文件源码
def tf_normal(y, mu, sigma):
  oneDivSqrtTwoPI = 1 / math.sqrt(2*math.pi)
  result = tf.sub(y, mu)
  result = tf.transpose(result, [2,1,0])
  result = tf.mul(result,tf.inv(sigma + 1e-8))
  result = -tf.square(result)/2
  result = tf.mul(tf.exp(result),tf.inv(sigma + 1e-8))*oneDivSqrtTwoPI
  result = tf.reduce_prod(result, reduction_indices=[0])
  return result
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_logging_trainable(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      var = tf.Variable(tf.constant(42.0), name='foo')
      var.initializer.run()
      cof = tf.constant(1.0)
      loss = tf.sub(tf.mul(var, cof), tf.constant(1.0))
      train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
      tf.get_default_session().run(train_step)
      self._run_monitor(learn.monitors.LoggingTrainable('foo'))
      self.assertRegexpMatches(str(self.logged_message), var.name)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_logging_trainable(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      var = tf.Variable(tf.constant(42.0), name='foo')
      var.initializer.run()
      cof = tf.constant(1.0)
      loss = tf.sub(tf.mul(var, cof), tf.constant(1.0))
      train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
      tf.get_default_session().run(train_step)
      self._run_monitor(learn.monitors.LoggingTrainable('foo'))
      self.assertRegexpMatches(str(self.logged_message), var.name)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def setUp(self):
    super(CoreBinaryOpsTest, self).setUp()

    self.x_probs_broadcast_tensor = tf.reshape(
        self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size])

    self.channel_probs_broadcast_tensor = tf.reshape(
        self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size])

    # == and != are not element-wise for tf.Tensor, so they shouldn't be
    # elementwise for LabeledTensor, either.
    self.ops = [
        ('add', operator.add, tf.add, core.add),
        ('sub', operator.sub, tf.sub, core.sub),
        ('mul', operator.mul, tf.mul, core.mul),
        ('div', operator.truediv, tf.div, core.div),
        ('mod', operator.mod, tf.mod, core.mod),
        ('pow', operator.pow, tf.pow, core.pow_function),
        ('equal', None, tf.equal, core.equal),
        ('less', operator.lt, tf.less, core.less),
        ('less_equal', operator.le, tf.less_equal, core.less_equal),
        ('not_equal', None, tf.not_equal, core.not_equal),
        ('greater', operator.gt, tf.greater, core.greater),
        ('greater_equal', operator.ge, tf.greater_equal, core.greater_equal),
    ]
    self.test_lt_1 = self.x_probs_lt
    self.test_lt_2 = self.channel_probs_lt
    self.test_lt_1_broadcast = self.x_probs_broadcast_tensor
    self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor
    self.broadcast_axes = [self.a0, self.a1, self.a3]
项目:TF-Examples    作者:CharlesShang    | 项目源码 | 文件源码
def image_preprocessing(image_buffer, bbox, train, thread_id=0):
    """Decode and preprocess one image for evaluation or training.

  Args:
    image_buffer: JPEG encoded string Tensor
    bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
      where each coordinate is [0, 1) and the coordinates are arranged as
      [ymin, xmin, ymax, xmax].
    train: boolean
    thread_id: integer indicating preprocessing thread

  Returns:
    3-D float Tensor containing an appropriately scaled image

  Raises:
    ValueError: if user does not provide bounding box
  """
    if bbox is None:
        raise ValueError('Please supply a bounding box.')

    image = decode_jpeg(image_buffer)
    height = FLAGS.input_size
    width = FLAGS.input_size

    if train:
        image = distort_image(image, height, width, bbox, thread_id)
    else:
        image = eval_image(image, height, width)

    # Finally, rescale to [-1,1] instead of [0, 1)
    image = tf.sub(image, 0.5)
    image = tf.mul(image, 2.0)
    return image