Python tensorflow 模块,sign() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.sign()

项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def mu_law_encode_nonlinear(audio, quantization_channels=256):
    '''
    Compress the waveform amplitudes using mu-law non-linearity. 
    NOTE: This mu-law functions as a non-linear function as opposed to 
          quantization.
    '''
    with tf.name_scope('encode'):
        mu = tf.to_float(quantization_channels - 1)
        # Perform mu-law companding transformation (ITU-T, 1988).
        # Minimum operation is here to deal with rare large amplitudes caused
        # by resampling.
        safe_audio_abs = tf.minimum(tf.abs(audio), 1.0)
        magnitude = tf.log1p(mu * safe_audio_abs) / tf.log1p(mu)
        signal = tf.multiply(tf.sign(audio), magnitude, name='mulaw')
        # Quantize signal to the specified number of levels.
        # return tf.to_int32((signal + 1) / 2 * mu + 0.5)
        return signal
项目:querysum    作者:helmertz    | 项目源码 | 文件源码
def _score(self, prev_decoder_state, prev_embedding):
        # Returns scores in a tensor of shape [batch_size, input_sequence_length]

        if self.mode == 'decode':
            query_part = self.query_attention_partial_score_placeholder
            encoder_part = self.encoder_state_attention_partial_scores_placeholder
        else:
            query_part = self.query_attention_partial_score
            encoder_part = self.encoder_state_attention_partial_scores

        embedding_part = tf.matmul(prev_embedding, self.attention_w_e)

        output = tf.matmul(prev_decoder_state,
                           self.attention_w) + embedding_part + query_part + encoder_part + self.attention_b
        output = tf.tanh(output)
        output = tf.reduce_sum(self.attention_v * output, axis=2)
        output = tf.transpose(output, [1, 0])

        # Handle input document padding by giving a large penalty, eliminating it from the weighted average
        padding_penalty = -1e20 * tf.to_float(1 - tf.sign(self.documents_placeholder))
        masked = output + padding_penalty

        return masked
项目:onsager_deep_learning    作者:mborgerding    | 项目源码 | 文件源码
def shrink_soft_threshold(r,rvar,theta):
    """
    soft threshold function
        y=sign(x)*max(0,abs(x)-theta[0]*sqrt(rvar) )*scaling
    where scaling is theta[1] (default=1)
    in other words, if theta is len(1), then the standard
    """
    if len(theta.get_shape())>0 and theta.get_shape() != (1,):
        lam = theta[0] * tf.sqrt(rvar)
        scale=theta[1]
    else:
        lam  = theta * tf.sqrt(rvar)
        scale = None
    lam = tf.maximum(lam,0)
    arml = tf.abs(r) - lam
    xhat = tf.sign(r) * tf.maximum(arml,0)
    dxdr = tf.reduce_mean(tf.to_float(arml>0),0)
    if scale is not None:
        xhat = xhat*scale
        dxdr = dxdr*scale
    return (xhat,dxdr)
项目:onsager_deep_learning    作者:mborgerding    | 项目源码 | 文件源码
def shrink_spline(r,rvar,theta):
    """ Spline-based shrinkage function
    """
    scale = theta[0]*tf.sqrt(rvar)
    rs = tf.sign(r)
    ar = tf.abs(r/scale)
    ar2 = tf.square(ar)
    ar3 = ar*ar2
    reg1 = tf.to_float(ar<1)
    reg2 = tf.to_float(ar<2)-reg1
    ar_m2 = 2-ar
    ar_m2_p2 = tf.square(ar_m2)
    ar_m2_p3 = ar_m2*ar_m2_p2
    beta3 = ( (2./3 - ar2  + .5*ar3)*reg1 + (1./6*(ar_m2_p3))*reg2 )
    xhat = r*(theta[1] + theta[2]*beta3)
    return (xhat,auto_gradients(xhat,r))
项目:cleverhans    作者:tensorflow    | 项目源码 | 文件源码
def attack_single_step(self, x, eta, y):
        """
        Given the original image and the perturbation computed so far, computes
        a new perturbation.

        :param x: A tensor with the original input.
        :param eta: A tensor the same shape as x that holds the perturbation.
        :param y: A tensor with the target labels or ground-truth labels.
        """
        import tensorflow as tf
        from cleverhans.utils_tf import model_loss, clip_eta

        adv_x = x + eta
        preds = self.model.get_probs(adv_x)
        loss = model_loss(y, preds)
        if self.targeted:
            loss = -loss
        grad, = tf.gradients(loss, adv_x)
        scaled_signed_grad = self.eps_iter * tf.sign(grad)
        adv_x = adv_x + scaled_signed_grad
        if self.clip_min is not None and self.clip_max is not None:
            adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
        eta = adv_x - x
        eta = clip_eta(eta, self.ord, self.eps)
        return x, eta
项目:magenta    作者:tensorflow    | 项目源码 | 文件源码
def mu_law(x, mu=255, int8=False):
  """A TF implementation of Mu-Law encoding.

  Args:
    x: The audio samples to encode.
    mu: The Mu to use in our Mu-Law.
    int8: Use int8 encoding.

  Returns:
    out: The Mu-Law encoded int8 data.
  """
  out = tf.sign(x) * tf.log(1 + mu * tf.abs(x)) / np.log(1 + mu)
  out = tf.floor(out * 128)
  if int8:
    out = tf.cast(out, tf.int8)
  return out
项目:NER-LSTM-CRF    作者:liu-nlper    | 项目源码 | 文件源码
def compute_loss(self):
        """
        ??loss

        Return:
            loss: scalar
        """
        if not self._use_crf:
            labels = tf.reshape(
                tf.contrib.layers.one_hot_encoding(
                    tf.reshape(self.input_label_ph, [-1]), num_classes=self._nb_classes),
                shape=[-1, self._sequence_length, self._nb_classes])
            cross_entropy = -tf.reduce_sum(labels * tf.log(self.logits), axis=2)
            mask = tf.sign(tf.reduce_max(tf.abs(labels), axis=2))
            cross_entropy_masked = tf.reduce_sum(
                cross_entropy*mask, axis=1) / tf.cast(self.sequence_actual_length, tf.float32)
            return tf.reduce_mean(cross_entropy_masked)
        else:
            log_likelihood, self.transition_params = tf.contrib.crf.crf_log_likelihood(
                self.logits, self.input_label_ph, self.sequence_actual_length)
            return tf.reduce_mean(-log_likelihood)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def _sample(self, n_samples):
        # samples must be sampled from (-1, 1) rather than [-1, 1)
        loc, scale = self.loc, self.scale
        if not self.is_reparameterized:
            loc = tf.stop_gradient(loc)
            scale = tf.stop_gradient(scale)
        shape = tf.concat([[n_samples], self.batch_shape], 0)
        uniform_samples = tf.random_uniform(
            shape=shape,
            minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
                                self.dtype.as_numpy_dtype(0.)),
            maxval=1.,
            dtype=self.dtype)
        samples = loc - scale * tf.sign(uniform_samples) * \
            tf.log1p(-tf.abs(uniform_samples))
        static_n_samples = n_samples if isinstance(n_samples, int) else None
        samples.set_shape(
            tf.TensorShape([static_n_samples]).concatenate(
                self.get_batch_shape()))
        return samples
项目:hyperchamber    作者:255BITS    | 项目源码 | 文件源码
def create_rnn(config, x, scope='rnn'):
    with tf.variable_scope(scope):
        memory=config['rnn_size']
        cell = rnn_cell.BasicLSTMCell(memory)
        state = cell.zero_state(batch_size=config['batch_size'], dtype=tf.float32)
        x, state = rnn.rnn(cell, [tf.cast(x,tf.float32)], initial_state=state, dtype=tf.float32)
        x = x[-1]
        #w = tf.get_variable('w', [hc.get('rnn_size'),4])
        #b = tf.get_variable('b', [4])
        #x = tf.nn.xw_plus_b(x, w, b)
        x=tf.sign(x)
        return x, state

# Each step of the graph we have:
# x is [BATCH_SIZE, 4] where the data is an one hot binary vector of the form:
# [start_token end_token a b]
#
# y is [BATCH_SIZE, 4] is a binary vector of the chance each character is correct
#
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def __init__(self, n_features, lenscale=1.0, p=1, variational=False,
                 lenscale_posterior=None):
        """Create an instance of an arc cosine kernel layer."""
        # Setup random weights
        if variational:
            kern = RBFVariational(lenscale=lenscale,
                                  lenscale_posterior=lenscale_posterior)
        else:
            kern = RBF(lenscale=lenscale)
        super().__init__(n_features=n_features, kernel=kern)

        # Kernel order
        assert isinstance(p, int) and p >= 0
        if p == 0:
            self.pfunc = tf.sign
        elif p == 1:
            self.pfunc = lambda x: x
        else:
            self.pfunc = lambda x: tf.pow(x, p)
项目:YellowFin    作者:JianGoForIt    | 项目源码 | 文件源码
def get_cubic_root(self):
    # We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
    # where x = sqrt(mu).
    # We substitute x, which is sqrt(mu), with x = y + 1.
    # It gives y^3 + py = q
    # where p = (D^2 h_min^2)/(2*C) and q = -p.
    # We use the Vieta's substution to compute the root.
    # There is only one real solution y (which is in [0, 1] ).
    # http://mathworld.wolfram.com/VietasSubstitution.html
    # assert_array = \
    #   [tf.Assert(tf.logical_not(tf.is_nan(self._dist_to_opt_avg) ), [self._dist_to_opt_avg,]), 
    #   tf.Assert(tf.logical_not(tf.is_nan(self._h_min) ), [self._h_min,]), 
    #   tf.Assert(tf.logical_not(tf.is_nan(self._grad_var) ), [self._grad_var,]),
    #   tf.Assert(tf.logical_not(tf.is_inf(self._dist_to_opt_avg) ), [self._dist_to_opt_avg,]), 
    #   tf.Assert(tf.logical_not(tf.is_inf(self._h_min) ), [self._h_min,]), 
    #   tf.Assert(tf.logical_not(tf.is_inf(self._grad_var) ), [self._grad_var,])]
    # with tf.control_dependencies(assert_array):
    # EPS in the numerator to prevent momentum being exactly one in case of 0 gradient
    p = (self._dist_to_opt_avg + EPS)**2 * (self._h_min + EPS)**2 / 2 / (self._grad_var + EPS)
    w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
    w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0)
    y = w - p / 3.0 / (w + EPS)
    x = y + 1
    return x
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def ternary_encoder(input_data):
  """Encoding and compressing the signs """
  a = tf.sign(input_data) # -1, 0, 1
  a = tf.add(a,1) # shift -1,0,1 to 0,1,2 (2'b00,2'b01,2'b10)
  a = tf.reshape(a,[-1])
  pad_size = 4 - tf.mod(tf.size(a), 4)
  pad = tf.range(0.0, pad_size)
  a = tf.concat([a, pad], 0)
  a_split1, a_split2, a_split3, a_split4 = tf.split(a,4) # assume the size is dividable by 4

  # encode 4 grads into 1 Byte
  sum_1 = tf.add(a_split1, a_split2*4)
  sum_2 = tf.add(a_split3*16, a_split4*64)
  sum_all = tf.add(sum_1, sum_2)
  encoded = tf.cast(sum_all, tf.uint8)
  return encoded
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def stochastical_binarize_gradients(grads_and_vars, scalers):
  """Stochastically binarize gradients."""
  gradients, variables = zip(*grads_and_vars)
  binarized_gradients = []
  for gradient, scaler in zip(gradients, scalers):
    if gradient is None:
      binarized_gradients.append(None)
      continue
    if isinstance(gradient, tf.IndexedSlices):
      gradient_shape = gradient.dense_shape
    else:
      gradient_shape = gradient.get_shape()

    zeros = tf.zeros(gradient_shape)
    abs_gradient = tf.abs(gradient)
    sign_gradient = tf.sign( gradient )
    rnd_sample = tf.random_uniform(gradient_shape,0,scaler)
    where_cond = tf.less(rnd_sample, abs_gradient)
    binarized_gradient = tf.cond(tf.size(gradient) < FLAGS.size_to_binarize,
                               lambda: gradient,
                               lambda: tf.where(where_cond, sign_gradient * scaler, zeros))

    binarized_gradients.append(binarized_gradient)
  return list(zip(binarized_gradients, variables))
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def ternary_encoder(input_data):
  """Encoding and compressing the signs """
  a = tf.sign(input_data) # -1, 0, 1
  a = tf.add(a,1) # shift -1,0,1 to 0,1,2 (2'b00,2'b01,2'b10)
  a = tf.reshape(a,[-1])
  pad_size = 4 - tf.mod(tf.size(a), 4)
  pad = tf.range(0.0, pad_size)
  a = tf.concat([a, pad], 0)
  a_split1, a_split2, a_split3, a_split4 = tf.split(a,4) # assume the size is dividable by 4

  # encode 4 grads into 1 Byte
  sum_1 = tf.add(a_split1, a_split2*4)
  sum_2 = tf.add(a_split3*16, a_split4*64)
  sum_all = tf.add(sum_1, sum_2)
  encoded = tf.cast(sum_all, tf.uint8)
  return encoded
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def build_summary(self):
        tf.summary.scalar('loss/reg_loss', tf.add_n(self.reg_loss))
        tf.summary.scalar('loss/total_loss', self.total_loss)
        tf.summary.scalar('loss/sparse_loss',self.sparse_loss)
        if self.is_skip:
            tf.summary.histogram('activation/image_fc2',self.image_fc2)
        if not self.is_TopKloss:
            tf.summary.histogram('data_similarity/imsim',tf.sign(tf.nn.relu(self.image_margin-self.im_similarity)))
            tf.summary.histogram('data_similarity/sensim',tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity)))
        tf.summary.scalar('msic/dneg', self.d_neg)
        tf.summary.scalar('msic/dpos', self.d_pos)        
        for name, tensor in self.endpoint.items():
            tf.summary.histogram('activation/' + name, tensor)

        t_var = tf.trainable_variables()
        watch_list = ['s_fc1', 's_fc2']
        if not self.is_skip:
            watch_list += ['i_fc1', 'i_fc2']        
        for watch_scope in watch_list:
            watch_var = [var for var in t_var if watch_scope+'/weights' in var.name]
            tf.summary.histogram('weights/'+watch_scope, watch_var[0])
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def top_K_loss_margin(self,sentence,image,K=50,margin=0.3):
        sim_matrix = tf.matmul(sentence, image, transpose_b=True)
        s_square = tf.reduce_sum(tf.square(sentence), axis=1)
        im_square = tf.reduce_sum(tf.square(image), axis=1)
        d = tf.reshape(s_square,[-1,1]) - 2 * sim_matrix + tf.reshape(im_square, [1, -1])
        positive = tf.stack([tf.matrix_diag_part(d)] * K, axis=1)
        length = tf.shape(d)[-1]
        d = tf.matrix_set_diag(d, 8 * tf.ones([length]))
        flag =8-7*tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity))
        sen_loss_K ,_ = tf.nn.top_k(-1.0 * d *flag, K, sorted=False) # note: this is negative value
        im_loss_K,_ = tf.nn.top_k(tf.transpose(-1.0 * d*flag)*tf.sign(tf.nn.relu(self.image_margin-self.im_similarity)), K, sorted=False) # note: this is negative value
        sentence_center_loss = tf.nn.relu(positive + sen_loss_K + margin)
        image_center_loss = tf.nn.relu(positive + im_loss_K + margin)
        self.d_neg = tf.reduce_mean((sen_loss_K + im_loss_K)/-2.0)
        self.d_pos =tf.reduce_mean(positive)
        self.endpoint['debug/im_loss_topK'] = -1.0 * im_loss_K
        self.endpoint['debug/sen_loss_topK'] = -1.0 * sen_loss_K 
        self.endpoint['debug/d_Matrix'] = d
        self.endpoint['debug/positive'] = positive
        self.endpoint['debug/s_center_loss'] = sentence_center_loss
        self.endpoint['debug/i_center_loss'] = image_center_loss
        self.endpoint['debug/S'] = sim_matrix
        self.endpoint['debug/sentence_square'] = s_square
        self.endpoint['debug/image_square'] = im_square
        return tf.reduce_sum(sentence_center_loss), tf.reduce_sum(image_center_loss)
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def build_summary(self):
        tf.summary.scalar('loss/reg_loss', tf.add_n(self.reg_loss))
        tf.summary.scalar('loss/total_loss', self.total_loss)
        if self.is_skip:
            tf.summary.histogram('activation/image_fc2',self.image_fc2)
        if not self.is_TopKloss:
            tf.summary.histogram('data_similarity/imsim',tf.sign(tf.nn.relu(self.image_margin-self.im_similarity)))
            tf.summary.histogram('data_similarity/sensim',tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity)))
        tf.summary.scalar('msic/dneg', self.d_neg)
        tf.summary.scalar('msic/dpos', self.d_pos)        
        for name, tensor in self.endpoint.items():
            tf.summary.histogram('activation/' + name, tensor)

        t_var = tf.trainable_variables()
        watch_list = ['s_fc1', 's_fc2']
        if not self.is_skip:
            watch_list += ['i_fc1', 'i_fc2']        
        for watch_scope in watch_list:
            watch_var = [var for var in t_var if watch_scope+'/weights' in var.name]
            tf.summary.histogram('weights/'+watch_scope, watch_var[0])
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def top_K_loss_margin(self,sentence,image,K=50,margin=0.3):
        sim_matrix = tf.matmul(sentence, image, transpose_b=True)
        s_square = tf.reduce_sum(tf.square(sentence), axis=1)
        im_square = tf.reduce_sum(tf.square(image), axis=1)
        d = tf.reshape(s_square,[-1,1]) - 2 * sim_matrix + tf.reshape(im_square, [1, -1])
        positive = tf.stack([tf.matrix_diag_part(d)] * K, axis=1)
        length = tf.shape(d)[-1]
        d = tf.matrix_set_diag(d, 8 * tf.ones([length]))
        sen_loss_K ,_ = tf.nn.top_k(-1.0 * d *tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity)), K, sorted=False) # note: this is negative value
        im_loss_K,_ = tf.nn.top_k(tf.transpose(-1.0 * d)*tf.sign(tf.nn.relu(self.image_margin-self.im_similarity)), K, sorted=False) # note: this is negative value
        sentence_center_loss = tf.nn.relu(positive + sen_loss_K + margin)
        image_center_loss = tf.nn.relu(positive + im_loss_K + margin)
        self.d_neg = tf.reduce_mean((sen_loss_K + im_loss_K)/-2.0)
        self.d_pos =tf.reduce_mean(positive)
        self.endpoint['debug/im_loss_topK'] = -1.0 * im_loss_K
        self.endpoint['debug/sen_loss_topK'] = -1.0 * sen_loss_K 
        self.endpoint['debug/d_Matrix'] = d
        self.endpoint['debug/positive'] = positive
        self.endpoint['debug/s_center_loss'] = sentence_center_loss
        self.endpoint['debug/i_center_loss'] = image_center_loss
        self.endpoint['debug/S'] = sim_matrix
        self.endpoint['debug/sentence_square'] = s_square
        self.endpoint['debug/image_square'] = im_square
        return tf.reduce_sum(sentence_center_loss), tf.reduce_sum(image_center_loss)
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def top_K_loss_margin(self,sentence,image,K=50,margin=0.3):
        sim_matrix = tf.matmul(sentence, image, transpose_b=True)
        s_square = tf.reduce_sum(tf.square(sentence), axis=1)
        im_square = tf.reduce_sum(tf.square(image), axis=1)
        d = tf.reshape(s_square,[-1,1]) - 2 * sim_matrix + tf.reshape(im_square, [1, -1])
        positive = tf.stack([tf.matrix_diag_part(d)] * K, axis=1)
        length = tf.shape(d)[-1]
        d = tf.matrix_set_diag(d, 8 * tf.ones([length]))
        flag =8-7*tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity))
        sen_loss_K ,_ = tf.nn.top_k(-1.0 * d *flag, K, sorted=False) # note: this is negative value
        im_loss_K,_ = tf.nn.top_k(tf.transpose(-1.0 * d*flag), K, sorted=False) # note: this is negative value
        sentence_center_loss = tf.nn.relu(positive + sen_loss_K + margin)
        image_center_loss = tf.nn.relu(positive + im_loss_K + margin)
        self.d_neg = tf.reduce_mean((sen_loss_K + im_loss_K)/-2.0)
        self.d_pos =tf.reduce_mean(positive)
        self.endpoint['debug/im_loss_topK'] = -1.0 * im_loss_K
        self.endpoint['debug/sen_loss_topK'] = -1.0 * sen_loss_K 
        self.endpoint['debug/d_Matrix'] = d
        self.endpoint['debug/positive'] = positive
        self.endpoint['debug/s_center_loss'] = sentence_center_loss
        self.endpoint['debug/i_center_loss'] = image_center_loss
        self.endpoint['debug/S'] = sim_matrix
        self.endpoint['debug/sentence_square'] = s_square
        self.endpoint['debug/image_square'] = im_square
        return tf.reduce_sum(sentence_center_loss), tf.reduce_sum(image_center_loss)
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def build_summary(self):
        tf.summary.scalar('loss/reg_loss', tf.add_n(self.reg_loss))
        tf.summary.scalar('loss/softmax_loss',self.softmaxloss)
        tf.summary.scalar('loss/total_loss', self.total_loss)
        if self.is_skip:
            tf.summary.histogram('activation/image_fc2',self.image_fc2)
        if not self.is_TopKloss:
            tf.summary.histogram('data_similarity/imsim',tf.sign(tf.nn.relu(self.image_margin-self.im_similarity)))
            tf.summary.histogram('data_similarity/sensim',tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity)))
        tf.summary.scalar('msic/dneg', self.d_neg)
        tf.summary.scalar('msic/dpos', self.d_pos)        
        for name, tensor in self.endpoint.items():
            tf.summary.histogram('activation/' + name, tensor)

        t_var = tf.trainable_variables()
        watch_list = ['s_fc1', 's_fc2']
        if not self.is_skip:
            watch_list += ['i_fc1', 'i_fc2']        
        for watch_scope in watch_list:
            watch_var = [var for var in t_var if watch_scope+'/weights' in var.name]
            tf.summary.histogram('weights/'+watch_scope, watch_var[0])
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def top_K_loss_margin(self,sentence,image,K=50,margin=0.3):
        sim_matrix = tf.matmul(sentence, image, transpose_b=True)
        s_square = tf.reduce_sum(tf.square(sentence), axis=1)
        im_square = tf.reduce_sum(tf.square(image), axis=1)
        d = tf.reshape(s_square,[-1,1]) - 2 * sim_matrix + tf.reshape(im_square, [1, -1])
        positive = tf.stack([tf.matrix_diag_part(d)] * K, axis=1)
        length = tf.shape(d)[-1]
        d = tf.matrix_set_diag(d, 8 * tf.ones([length]))
        flag =8-7*tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity))
        sen_loss_K ,_ = tf.nn.top_k(-1.0 * d *flag, K, sorted=False) # note: this is negative value
        im_loss_K,_ = tf.nn.top_k(tf.transpose(-1.0 * d*flag), K, sorted=False) # note: this is negative value
        sentence_center_loss = tf.nn.relu(positive + sen_loss_K + margin)
        image_center_loss = tf.nn.relu(positive + im_loss_K + margin)
        self.d_neg = tf.reduce_mean((sen_loss_K + im_loss_K)/-2.0)
        self.d_pos =tf.reduce_mean(positive)
        self.endpoint['debug/im_loss_topK'] = -1.0 * im_loss_K
        self.endpoint['debug/sen_loss_topK'] = -1.0 * sen_loss_K 
        self.endpoint['debug/d_Matrix'] = d
        self.endpoint['debug/positive'] = positive
        self.endpoint['debug/s_center_loss'] = sentence_center_loss
        self.endpoint['debug/i_center_loss'] = image_center_loss
        self.endpoint['debug/S'] = sim_matrix
        self.endpoint['debug/sentence_square'] = s_square
        self.endpoint['debug/image_square'] = im_square
        return tf.reduce_sum(sentence_center_loss), tf.reduce_sum(image_center_loss)
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def build_summary(self):
        tf.summary.scalar('loss/reg_loss', tf.add_n(self.reg_loss))
        tf.summary.scalar('loss/total_loss', self.total_loss)
        if self.is_skip:
            tf.summary.histogram('activation/image_fc2',self.image_fc2)
        if not self.is_TopKloss:
            tf.summary.histogram('data_similarity/imsim',tf.sign(tf.nn.relu(self.image_margin-self.im_similarity)))
            tf.summary.histogram('data_similarity/sensim',tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity)))
        tf.summary.scalar('msic/dneg', self.d_neg)
        tf.summary.scalar('msic/dpos', self.d_pos)        
        for name, tensor in self.endpoint.items():
            tf.summary.histogram('activation/' + name, tensor)

        t_var = tf.trainable_variables()
        watch_list = ['s_fc1', 's_fc2']
        if not self.is_skip:
            watch_list += ['i_fc1', 'i_fc2']        
        for watch_scope in watch_list:
            watch_var = [var for var in t_var if watch_scope+'/weights' in var.name]
            tf.summary.histogram('weights/'+watch_scope, watch_var[0])
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def top_K_loss_margin(self,sentence,image,K=50,margin=0.2):
        sim_matrix = tf.matmul(sentence, image, transpose_b=True)
        s_square = tf.reduce_sum(tf.square(sentence), axis=1)
        im_square = tf.reduce_sum(tf.square(image), axis=1)
        d = 1-tf.sigmoid(sim_matrix)
        positive = tf.stack([tf.matrix_diag_part(d)] * K, axis=1)
        length = tf.shape(d)[-1]
        dd = tf.matrix_set_diag(d, 8 * tf.ones([length]))
        flag =8-7*tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity))
        sen_loss_K ,_ = tf.nn.top_k(-1.0 * dd *flag, K, sorted=False) # note: this is negative value
        im_loss_K,_ = tf.nn.top_k(-tf.transpose(1.0 * dd*flag), K, sorted=False) # note: this is negative value
        sentence_center_loss = -tf.log(1-positive+1e-12)-tf.log(-sen_loss_K+1e-12)
        image_center_loss = -tf.log(1-positive+1e-12)-tf.log(-im_loss_K+1e-12)
        self.d_neg = tf.reduce_mean((sen_loss_K + im_loss_K)/-2.0)
        self.d_pos =tf.reduce_mean(positive)
        self.endpoint['debug/im_loss_topK'] = -1.0 * im_loss_K
        self.endpoint['debug/sen_loss_topK'] = -1.0 * sen_loss_K 
        self.endpoint['debug/d_Matrix'] = d
        self.endpoint['debug/positive'] = positive
        self.endpoint['debug/s_center_loss'] = sentence_center_loss
        self.endpoint['debug/i_center_loss'] = image_center_loss
        self.endpoint['debug/S'] = sim_matrix
        self.endpoint['debug/sentence_square'] = s_square
        self.endpoint['debug/image_square'] = im_square
        return tf.reduce_sum(sentence_center_loss), tf.reduce_sum(image_center_loss)
项目:FeatureSqueezing    作者:QData    | 项目源码 | 文件源码
def get_gradient_sign_tf(x, predictions):
    """
    TensorFlow implementation of calculting signed gradient with respect to x.
    :param x: the input placeholder
    :param predictions: the model's output tensor
    :return: a tensor for the adversarial example
    """

    # Compute loss
    y = tf.to_float(tf.equal(predictions, tf.reduce_max(predictions, 1, keep_dims=True)))
    y = y / tf.reduce_sum(y, 1, keep_dims=True)
    loss = utils_tf.tf_model_loss(y, predictions, mean=False)

    # Define gradient of loss wrt input
    grad, = tf.gradients(loss, x)

    # Take sign of gradient
    signed_grad = tf.sign(grad)
    signed_grad = tf.stop_gradient(signed_grad)
    return signed_grad
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _quantize(x, params, randomize=True):
    """Quantize x according to params, optionally randomizing the rounding."""
    if not params.quantize:
        return x

    if not randomize:
        return tf.bitcast(
            tf.cast(x / params.quantization_scale, tf.int16), tf.float16)

    abs_x = tf.abs(x)
    sign_x = tf.sign(x)
    y = abs_x / params.quantization_scale
    y = tf.floor(y + tf.random_uniform(tf.shape(x)))
    y = tf.minimum(y, tf.int16.max) * sign_x
    q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
    return q
项目:cfrnet    作者:clinicalml    | 项目源码 | 文件源码
def lindisc(X,p,t):
    ''' Linear MMD '''

    it = tf.where(t>0)[:,0]
    ic = tf.where(t<1)[:,0]

    Xc = tf.gather(X,ic)
    Xt = tf.gather(X,it)

    mean_control = tf.reduce_mean(Xc,reduction_indices=0)
    mean_treated = tf.reduce_mean(Xt,reduction_indices=0)

    c = tf.square(2*p-1)*0.25
    f = tf.sign(p-0.5)

    mmd = tf.reduce_sum(tf.square(p*mean_treated - (1-p)*mean_control))
    mmd = f*(p-0.5) + safe_sqrt(c + mmd)

    return mmd
项目:RNNVis    作者:myaooo    | 项目源码 | 文件源码
def sequence_length(sequence):
    """
    Get the length tensor of a batched_sequence
        when embedding, or say, input sequence is a 3D tensor, the empty part should be filled with 0.s
        whe word_id, or say, input sequence is a 2D tensor, the empty part should be filled with -1s
    :param sequence: a Tensor of shape [batch_size, max_length(, embedding_size)]
    :return: a 1D Tensor of shape (batch_size,) representing the length of the sequence
    """
    embedding = len(sequence.get_shape()) == 3
    if embedding:
        # zeros will be 0., others will be 1.
        used = tf.sign(tf.reduce_max(tf.abs(sequence), axis=2))
    else:
        # -1 will be 0, others will be 1.
        used = tf.sign(sequence+1)
    length = tf.reduce_sum(used, axis=1)
    length = tf.cast(length, tf.int32)
    return length
项目:YOLOv1_tensorflow    作者:Nielsyang    | 项目源码 | 文件源码
def loss_func_yolo(output, label):
  res = 0

  for i in range(BATCH_SIZE):
    for j in range(0, S*S*(B*5+CLASSES), B*5+CLASSES):
      res += COORD_W * tf.sign(label[i][j+2]) * (tf.square(output[i][j] - label[i][j]) + tf.square(output[i][j+1]-label[i][j+1]) + 
                                               tf.square(output[i][j+2]/(label[i][j+2]+1e-7) - 1) + 
                                               tf.square(output[i][j+3]/(label[i][j+3]+1e-7) - 1))

      res += tf.sign(label[i][j+2]) * (tf.square(output[i][j+4] - label[i][j+4]))

      res += NOOBJ_W * tf.sign(tf.floor(label[i][j])) * (tf.square(output[i][j+4] - label[i][j+4]))

      res += COORD_W * tf.sign(label[i][j+7]) * (tf.square(output[i][j+5] - label[i][j+5]) + tf.square(output[i][j+6]-label[i][j+6]) + 
                                               tf.square(output[i][j+7]/(label[i][j+7]+1e-7) - 1) + 
                                               tf.square(output[i][j+8]/(label[i][j+8]+1e-7) - 1))

      res += tf.sign(label[i][j+7]) * (tf.square(output[i][j+9] - label[i][j+9]))

      res += NOOBJ_W * tf.sign(tf.floor(label[i][j+5])) * (tf.square(output[i][j+9] - label[i][j+9]))

      res += tf.sign(label[i][j+7]) * (tf.square(output[i][j+10] - label[i][j+10]) + tf.square(output[i][j+11] - label[i][j+11]))

  return res
项目:YOLOv1_tensorflow    作者:Nielsyang    | 项目源码 | 文件源码
def loss_func_yolo(output, label):
  res = 0

  for i in range(BATCH_SIZE):
    for j in range(0, S*S*(B*5+CLASSES), B*5+CLASSES):
      res += COORD_W * tf.sign(label[i][j+2]) * (tf.square(output[i][j] - label[i][j]) + tf.square(output[i][j+1]-label[i][j+1]) + 
                                               tf.square(output[i][j+2]/(label[i][j+2]+1e-7) - 1) + 
                                               tf.square(output[i][j+3]/(label[i][j+3]+1e-7) - 1))

      res += tf.sign(label[i][j+2]) * (tf.square(output[i][j+4] - label[i][j+4]))

      res += NOOBJ_W * tf.sign(tf.floor(label[i][j])) * (tf.square(output[i][j+4] - label[i][j+4]))

      res += COORD_W * tf.sign(label[i][j+7]) * (tf.square(output[i][j+5] - label[i][j+5]) + tf.square(output[i][j+6]-label[i][j+6]) + 
                                               tf.square(output[i][j+7]/(label[i][j+7]+1e-7) - 1) + 
                                               tf.square(output[i][j+8]/(label[i][j+8]+1e-7) - 1))

      res += tf.sign(label[i][j+7]) * (tf.square(output[i][j+9] - label[i][j+9]))

      res += NOOBJ_W * tf.sign(tf.floor(label[i][j+5])) * (tf.square(output[i][j+9] - label[i][j+9]))

      res += tf.sign(label[i][j+7]) * (tf.square(output[i][j+10] - label[i][j+10]) + tf.square(output[i][j+11] - label[i][j+11]))

  return res
项目:YOLOv1_tensorflow    作者:Nielsyang    | 项目源码 | 文件源码
def loss_func_yolo(output, label):
  res = 0

  for i in range(BATCH_SIZE):
    for j in range(0, S*S*(B*5+CLASSES), B*5+CLASSES):
      res += COORD_W * tf.sign(label[i][j+2]) * (tf.square(output[i][j] - label[i][j]) + tf.square(output[i][j+1]-label[i][j+1]) + 
                                               tf.square(output[i][j+2]/(label[i][j+2]+1e-7) - 1) + 
                                               tf.square(output[i][j+3]/(label[i][j+3]+1e-7) - 1))

      res += tf.sign(label[i][j+2]) * (tf.square(output[i][j+4] - label[i][j+4]))

      res += NOOBJ_W * tf.sign(tf.floor(label[i][j])) * (tf.square(output[i][j+4] - label[i][j+4]))

      res += COORD_W * tf.sign(label[i][j+7]) * (tf.square(output[i][j+5] - label[i][j+5]) + tf.square(output[i][j+6]-label[i][j+6]) + 
                                               tf.square(output[i][j+7]/(label[i][j+7]+1e-7) - 1) + 
                                               tf.square(output[i][j+8]/(label[i][j+8]+1e-7) - 1))

      res += tf.sign(label[i][j+7]) * (tf.square(output[i][j+9] - label[i][j+9]))

      res += NOOBJ_W * tf.sign(tf.floor(label[i][j+5])) * (tf.square(output[i][j+9] - label[i][j+9]))

      res += tf.sign(label[i][j+7]) * (tf.square(output[i][j+10] - label[i][j+10]) + tf.square(output[i][j+11] - label[i][j+11]))

  return res
项目:YOLOv1_tensorflow    作者:Nielsyang    | 项目源码 | 文件源码
def loss_func_yolo(output, exp):
  res = 0

  for i in range(BATCH_SIZE):
    for j in range(0, S*S*(B*5+CLASSES), B*5+CLASSES):
      res += COORD_W * tf.sign(exp[i][j+2]) * (tf.square(output[i][j] - exp[i][j]) + tf.square(output[i][j+1]-exp[i][j+1]) + 
                                               tf.square(tf.sqrt(tf.abs(output[i][j+2])) - tf.sqrt(exp[i][j+2])) + 
                                               tf.square(tf.sqrt(tf.abs(output[i][j+3])) - tf.sqrt(exp[i][j+3])))

      res += tf.sign(exp[i][j+2]) * (tf.square(output[i][j+4] - exp[i][j+4]))

      res += NOOBJ_W * tf.sign(tf.floor(exp[i][j])) * (tf.square(output[i][j+4] - exp[i][j+4]))

      res += COORD_W * tf.sign(exp[i][j+7]) * (tf.square(output[i][j+5] - exp[i][j+5]) + tf.square(output[i][j+6]-exp[i][j+6]) + 
                                               tf.square(tf.sqrt(tf.abs(output[i][j+7])) - tf.sqrt(exp[i][j+7])) + 
                                               tf.square(tf.sqrt(tf.abs(output[i][j+8])) - tf.sqrt(exp[i][j+8])))

      res += tf.sign(exp[i][j+7]) * (tf.square(output[i][j+9] - exp[i][j+9]))

      res += NOOBJ_W * tf.sign(tf.floor(exp[i][j+5])) * (tf.square(output[i][j+9] - exp[i][j+9]))

      res += tf.sign(exp[i][j+7]) * (tf.square(output[i][j+10] - exp[i][j+10]) + tf.square(output[i][j+11] - exp[i][j+11]))

  return res
项目:FeatureSqueezing    作者:uvasrg    | 项目源码 | 文件源码
def get_gradient_sign_tf(x, predictions):
    """
    TensorFlow implementation of calculting signed gradient with respect to x.
    :param x: the input placeholder
    :param predictions: the model's output tensor
    :return: a tensor for the adversarial example
    """

    # Compute loss
    y = tf.to_float(tf.equal(predictions, tf.reduce_max(predictions, 1, keep_dims=True)))
    y = y / tf.reduce_sum(y, 1, keep_dims=True)
    loss = utils_tf.tf_model_loss(y, predictions, mean=False)

    # Define gradient of loss wrt input
    grad, = tf.gradients(loss, x)

    # Take sign of gradient
    signed_grad = tf.sign(grad)
    signed_grad = tf.stop_gradient(signed_grad)
    return signed_grad
项目:tensor2tensor    作者:tensorflow    | 项目源码 | 文件源码
def _quantize(x, params, randomize=True):
  """Quantize x according to params, optionally randomizing the rounding."""
  if not params.quantize:
    return x

  if not randomize:
    return tf.bitcast(
        tf.cast(x / params.quantization_scale, tf.int16), tf.float16)

  abs_x = tf.abs(x)
  sign_x = tf.sign(x)
  y = abs_x / params.quantization_scale
  y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
  y = tf.minimum(y, tf.int16.max) * sign_x
  q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
  return q
项目:DeeplearningForTextClassification    作者:zldeng    | 项目源码 | 文件源码
def getSequenceRealLength(sequences):
    '''
    ??sequences?????
    input:[a_size,b_size,c_size],?????????????b_size??????0??c_size?tensor????
    return?????b_size????????
    '''
    abs_sequneces = tf.abs(sequences)

    #??????max is 0
    abs_max_seq = tf.reduce_max(abs_sequneces,reduction_indices = 2)

    max_seq_sign = tf.sign(abs_max_seq)

    #????0????????????
    real_len = tf.reduce_sum(max_seq_sign,reduction_indices = 1)

    return tf.cast(real_len,tf.int32)
项目:incremental-sequence-learning    作者:edwin-de-jong    | 项目源码 | 文件源码
def get_class_loss( self, args, z_classvars, z_classpred, targetdata_classvars ):
    self.mask = tf.sign( tf.abs( tf.reduce_max( targetdata_classvars, reduction_indices = 1 ) ) )

    self.result4 = tf.zeros( 1, dtype = tf.float32, name = None )
    if args.nrClassOutputVars > 0 and args.classweightfactor > 0:
      self.crossentropy = tf.nn.softmax_cross_entropy_with_logits( z_classvars, targetdata_classvars )
      self.result4 = args.classweightfactor *  self.crossentropy 
      self.result4 = tf.multiply( self.mask, self.result4 )
      self.targetdata_classvars = targetdata_classvars

    self.result = self.result4

    self.result_before_mask = self.result
    self.result *= self.mask #checked EdJ Sept 2: correctly only measures loss up to last point of actual sequence.
    self.lossvector = self.result

    self.lossnrpoints = tf.reduce_sum( self.mask )

    classloss = tf.reduce_sum( self.result  ) / self.lossnrpoints
    return classloss
项目:ternarynet    作者:czhu95    | 项目源码 | 文件源码
def p_ternarize(x, p):

    x = tf.tanh(x)
    shape = x.get_shape()

    thre = tf.get_variable('T', trainable=False, collections=[tf.GraphKeys.VARIABLES, 'thresholds'],
            initializer=0.05)
    flat_x = tf.reshape(x, [-1])
    k = int(flat_x.get_shape().dims[0].value * (1 - p))
    topK, _ = tf.nn.top_k(tf.abs(flat_x), k)
    update_thre = thre.assign(topK[-1])
    tf.add_to_collection('update_thre_op', update_thre)

    mask = tf.zeros(shape)
    mask = tf.select((x > thre) | (x < -thre), tf.ones(shape), mask)

    with G.gradient_override_map({"Sign": "Identity", "Mul": "Add"}):
        w =  tf.sign(x) * tf.stop_gradient(mask)

    tf.histogram_summary(w.name, w)
    return w
项目:ternarynet    作者:czhu95    | 项目源码 | 文件源码
def tw_ternarize(x, thre):

    shape = x.get_shape()

    thre_x = tf.stop_gradient(tf.reduce_max(tf.abs(x)) * thre)

    w_p = tf.get_variable('Wp', collections=[tf.GraphKeys.VARIABLES, 'positives'], initializer=1.0)
    w_n = tf.get_variable('Wn', collections=[tf.GraphKeys.VARIABLES, 'negatives'], initializer=1.0)

    tf.scalar_summary(w_p.name, w_p)
    tf.scalar_summary(w_n.name, w_n)

    mask = tf.ones(shape)
    mask_p = tf.select(x > thre_x, tf.ones(shape) * w_p, mask)
    mask_np = tf.select(x < -thre_x, tf.ones(shape) * w_n, mask_p)
    mask_z = tf.select((x < thre_x) & (x > - thre_x), tf.zeros(shape), mask)

    with G.gradient_override_map({"Sign": "Identity", "Mul": "Add"}):
        w =  tf.sign(x) * tf.stop_gradient(mask_z)

    w = w * mask_np

    tf.histogram_summary(w.name, w)
    return w
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def add_training_ops(self):
        def apply_gradient_clipping(gradient):
            if gradient is not None:
                return tf.mul(tf.clip_by_value(tf.abs(grad), 0.1, 1.),
                              tf.sign(grad))
            else:
                return None

        # optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
        optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate,
                                           beta1=0.9, beta2=0.999,
                                           epsilon=1e-08,
                                           use_locking=False, name='Adam')

        loss_op = self.loss_op + config.weight_decay_factor * tf.add_n(
            [tf.nn.l2_loss(v) for v in tf.get_collection('weights_decay')])

        gvs = optimizer.compute_gradients(loss_op)

        if self._clip_gradients:
            gvs = [(apply_gradient_clipping(grad), var) for grad, var in gvs]

        train_op = optimizer.apply_gradients(gvs)

        return train_op
项目:BGAN    作者:htconquer    | 项目源码 | 文件源码
def loss(x64, x_tilde, z_x_log_sigma_sq1, z_x_meanx1, d_x, d_x_p, l_x, l_x_tilde,ss_ ):

    SSE_loss = tf.reduce_mean(tf.square(x64 - x_tilde))


    pair_loss=tf.reduce_mean(tf.square(tf.matmul(z_x_meanx1, tf.transpose(z_x_meanx1))- ss_)) +\
              tf.reduce_mean(tf.square(z_x_meanx1 - tf.sign(z_x_meanx1)))

    KL_loss = tf.reduce_sum(-0.5 * tf.reduce_sum(1 + tf.clip_by_value(z_x_log_sigma_sq1, -10.0, 10.0)
                                                 - tf.square(tf.clip_by_value(z_x_meanx1, -10.0, 10.0))
                                                 - tf.exp(tf.clip_by_value(z_x_log_sigma_sq1, -10.0, 10.0)),
                                                 1)) / 64/64/3

    D_loss = tf.reduce_mean(-1. * (tf.log(tf.clip_by_value(d_x, 1e-5, 1.0)) +
                                   tf.log(tf.clip_by_value(1.0 - d_x_p, 1e-5, 1.0))))
    G_loss = tf.reduce_mean(-1. * (tf.log(tf.clip_by_value(d_x_p, 1e-5, 1.0))))
    LL_loss = tf.reduce_sum(tf.square(l_x - l_x_tilde)) / 64/64./3.
    return SSE_loss, KL_loss, D_loss, G_loss, LL_loss,pair_loss
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def sample_bernoulli(probs):
    return tf.nn.relu(tf.sign(probs - tf.random_uniform(tf.shape(probs))))
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def calculate_loss_mix(self, predictions, predictions_class, labels, **unused_params):
    with tf.name_scope("loss_mix"):
      float_labels = tf.cast(labels, tf.float32)
      if FLAGS.support_type=="class":
        seq = np.loadtxt(FLAGS.class_file)
        tf_seq = tf.one_hot(tf.constant(seq,dtype=tf.int32),FLAGS.encoder_size)
        float_classes_org = tf.matmul(float_labels,tf_seq)
        class_true = tf.ones(tf.shape(float_classes_org))
        class_false = tf.zeros(tf.shape(float_classes_org))
        float_classes = tf.where(tf.greater(float_classes_org, class_false), class_true, class_false)
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      elif FLAGS.support_type=="frequent":
        float_classes = float_labels[:,0:FLAGS.encoder_size]
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      elif FLAGS.support_type=="encoder":
        float_classes = float_labels
        for i in range(FLAGS.encoder_layers):
          var_i = np.loadtxt(FLAGS.autoencoder_dir+'autoencoder_layer%d.model' % i)
          weight_i = tf.constant(var_i[:-1,:],dtype=tf.float32)
          bias_i = tf.reshape(tf.constant(var_i[-1,:],dtype=tf.float32),[-1])
          float_classes = tf.nn.xw_plus_b(float_classes,weight_i,bias_i)
          if i<FLAGS.encoder_layers-1:
            float_classes = tf.nn.relu(float_classes)
          else:
            float_classes = tf.nn.sigmoid(float_classes)
            #float_classes = tf.nn.relu(tf.sign(float_classes - 0.5))
        cross_entropy_class = self.calculate_mseloss(predictions_class,float_classes)
      else:
        float_classes = float_labels
        for i in range(FLAGS.moe_layers-1):
          float_classes = tf.concat((float_classes,float_labels),axis=1)
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      cross_entropy_loss = self.calculate_loss(predictions,labels)
      return cross_entropy_loss + 0.1*cross_entropy_class
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def sample_prob(self,probs):
        return tf.nn.relu(tf.sign(probs - tf.random_uniform(probs.get_shape())))
项目:TensorFlow-World    作者:astorfi    | 项目源码 | 文件源码
def inference_fn(W,b,x_data,y_target):
    prediction = tf.sign(tf.subtract(tf.matmul(x_data, W), b))
    accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, y_target), tf.float32))
    return accuracy
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def mu_law_decode_nonlinear(output, quantization_channels=256):
    '''
    Uncompress the waveform amplitudes using mu-law non-linearity. 
    NOTE: This mu-law functions as a non-linear function.
    '''
    with tf.name_scope('decode'):
        mu = quantization_channels - 1
        # Map values back to [-1, 1].
        # signal = 2 * (tf.to_float(output) / mu) - 1
        signal = output
        # Perform inverse of mu-law transformation.
        magnitude = (1 / mu) * ((1 + mu)**abs(signal) - 1)
        return tf.sign(signal) * magnitude
项目:recurrent-entity-networks    作者:jimfleming    | 项目源码 | 文件源码
def get_sequence_length(sequence, scope=None):
    "Determine the length of a sequence that has been padded with zeros."
    with tf.variable_scope(scope, 'SequenceLength'):
        used = tf.sign(tf.reduce_max(tf.abs(sequence), reduction_indices=[-1]))
        length = tf.cast(tf.reduce_sum(used, reduction_indices=[-1]), tf.int32)
        return length
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def calc_seqlenth(input):
    # this code is copied from TFLearn retrieve seqlenth method. Credited to it's creator @aymericdamien
    with tf.name_scope('GetLength'):
        used = tf.sign(tf.reduce_max(tf.abs(input), reduction_indices=2))
        length = tf.reduce_sum(used, reduction_indices=1)
        length = tf.cast(length, tf.int32)
    return length
# This code is copied from TFLearn advanced_indexing_op() method. Credited to it's creator @aymericdamien
项目:onsager_deep_learning    作者:mborgerding    | 项目源码 | 文件源码
def simple_soft_threshold(r_, lam_):
    "implement a soft threshold function y=sign(r)*max(0,abs(r)-lam)"
    lam_ = tf.maximum(lam_, 0)
    return tf.sign(r_) * tf.maximum(tf.abs(r_) - lam_, 0)
项目:onsager_deep_learning    作者:mborgerding    | 项目源码 | 文件源码
def shrink_piecwise_linear(r,rvar,theta):
    """Implement the piecewise linear shrinkage function.
        With minor modifications and variance normalization.
        theta[...,0] : abscissa of first vertex, scaled by sqrt(rvar)
        theta[...,1] : abscissa of second vertex, scaled by sqrt(rvar)
        theta[...,2] : slope from origin to first vertex
        theta[''',3] : slope from first vertex to second vertex
        theta[...,4] : slope after second vertex
    """
    ab0 = theta[...,0]
    ab1 = theta[...,1]
    sl0 = theta[...,2]
    sl1 = theta[...,3]
    sl2 = theta[...,4]

    # scale each column by sqrt(rvar)
    scale_out = tf.sqrt(rvar)
    scale_in = 1/scale_out
    rs = tf.sign(r*scale_in)
    ra = tf.abs(r*scale_in)

    # split the piecewise linear function into regions
    rgn0 = tf.to_float( ra<ab0)
    rgn1 = tf.to_float( ra<ab1) - rgn0
    rgn2 = tf.to_float( ra>=ab1)
    xhat = scale_out * rs*(
            rgn0*sl0*ra +
            rgn1*(sl1*(ra - ab0) + sl0*ab0 ) +
            rgn2*(sl2*(ra - ab1) +  sl0*ab0 + sl1*(ab1-ab0) )
            )
    dxdr =  sl0*rgn0 + sl1*rgn1 + sl2*rgn2
    dxdr = tf.reduce_mean(dxdr,0)
    return (xhat,dxdr)
项目:cleverhans    作者:tensorflow    | 项目源码 | 文件源码
def main(_):
  eps = FLAGS.max_epsilon / 255.0
  batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]

  with tf.Graph().as_default():
    x_input = tf.placeholder(tf.float32, shape=batch_shape)
    noisy_images = x_input + eps * tf.sign(tf.random_normal(batch_shape))
    x_output = tf.clip_by_value(noisy_images, 0.0, 1.0)

    with tf.Session(FLAGS.master) as sess:
      for filenames, images in load_images(FLAGS.input_dir, batch_shape):
        out_images = sess.run(x_output, feed_dict={x_input: images})
        save_images(out_images, filenames, FLAGS.output_dir)
项目:rbm-ae-tf    作者:Cospel    | 项目源码 | 文件源码
def sample_prob(self, probs):
        return tf.nn.relu(tf.sign(probs - tf.random_uniform(tf.shape(probs))))