Python tensorflow 模块,count_nonzero() 实例源码

我们从Python开源项目中,提取了以下9个代码示例,用于说明如何使用tensorflow.count_nonzero()

项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _grad_sparsity(self):
        """Gradient sparsity."""
        # If the sparse minibatch gradient has 10 percent of its entries
        # non-zero, its sparsity is 0.1.
        # The norm of dense gradient averaged from full dataset
        # are roughly estimated norm of minibatch
        # sparse gradient norm * sqrt(sparsity)
        # An extension maybe only correct the sparse blob.
        non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad])
        all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad])
        self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype)
        self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype)
        avg_op = self._moving_averager.apply([self._sparsity, ])
        with tf.control_dependencies([avg_op]):
            self._sparsity_avg = self._moving_averager.average(self._sparsity)
        return avg_op
项目:tensor2tensor    作者:tensorflow    | 项目源码 | 文件源码
def _grad_sparsity(self):
    """Gradient sparsity."""
    # If the sparse minibatch gradient has 10 percent of its entries
    # non-zero, its sparsity is 0.1.
    # The norm of dense gradient averaged from full dataset
    # are roughly estimated norm of minibatch
    # sparse gradient norm * sqrt(sparsity)
    # An extension maybe only correct the sparse blob.
    non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad])
    all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad])
    self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype)
    self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype)
    avg_op = self._moving_averager.apply([self._sparsity,])
    with tf.control_dependencies([avg_op]):
      self._sparsity_avg = self._moving_averager.average(self._sparsity)
    return avg_op
项目:cnn_lstm_ctc_ocr    作者:weinman    | 项目源码 | 文件源码
def _get_testing(rnn_logits,sequence_length,label,label_length):
    """Create ops for testing (all scalars): 
       loss: CTC loss function value, 
       label_error:  Batch-normalized edit distance on beam search max
       sequence_error: Batch-normalized sequence error rate
    """
    with tf.name_scope("train"):
        loss = model.ctc_loss_layer(rnn_logits,label,sequence_length) 
    with tf.name_scope("test"):
        predictions,_ = tf.nn.ctc_beam_search_decoder(rnn_logits, 
                                                   sequence_length,
                                                   beam_width=128,
                                                   top_paths=1,
                                                   merge_repeated=True)
        hypothesis = tf.cast(predictions[0], tf.int32) # for edit_distance
        label_errors = tf.edit_distance(hypothesis, label, normalize=False)
        sequence_errors = tf.count_nonzero(label_errors,axis=0)
        total_label_error = tf.reduce_sum( label_errors )
        total_labels = tf.reduce_sum( label_length )
        label_error = tf.truediv( total_label_error, 
                                  tf.cast(total_labels, tf.float32 ),
                                  name='label_error')
        sequence_error = tf.truediv( tf.cast( sequence_errors, tf.int32 ),
                                     tf.shape(label_length)[0],
                                     name='sequence_error')
        tf.summary.scalar( 'loss', loss )
        tf.summary.scalar( 'label_error', label_error )
        tf.summary.scalar( 'sequence_error', sequence_error )

    return loss, label_error, sequence_error
项目:YellowFin    作者:JianGoForIt    | 项目源码 | 文件源码
def grad_sparsity(self):
    # If the sparse minibatch gradient has 10 percent of its entries
    # non-zero, its sparsity is 0.1.
    # The norm of dense gradient averaged from full dataset
    # are roughly estimated norm of minibatch
    # sparse gradient norm * sqrt(sparsity)
    # An extension maybe only correct the sparse blob.
    non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grads])
    all_entry_cnt = tf.add_n([tf.size(g) for g in self._grads])
    self._sparsity = tf.cast(non_zero_cnt, self._grads[0].dtype) \
      / tf.cast(all_entry_cnt, self._grads[0].dtype)
    avg_op = self._moving_averager.apply([self._sparsity, ])
    with tf.control_dependencies([avg_op]):
      self._sparsity_avg = self._moving_averager.average(self._sparsity)
    return avg_op
项目:tensorforce    作者:reinforceio    | 项目源码 | 文件源码
def tf_apply(self, x, update):
        if self.name == 'elu':
            x = tf.nn.elu(features=x)

        elif self.name == 'none':
            x = tf.identity(input=x)

        elif self.name == 'relu':
            x = tf.nn.relu(features=x)
            if 'relu' in self.summary_labels:
                non_zero = tf.cast(x=tf.count_nonzero(input_tensor=x), dtype=tf.float32)
                size = tf.cast(x=tf.reduce_prod(input_tensor=tf.shape(input=x)), dtype=tf.float32)
                summary = tf.summary.scalar(name='relu', tensor=(non_zero / size))
                self.summaries.append(summary)

        elif self.name == 'selu':
            # https://arxiv.org/pdf/1706.02515.pdf
            alpha = 1.6732632423543772848170429916717
            scale = 1.0507009873554804934193349852946
            negative = alpha * tf.nn.elu(features=x)
            x = scale * tf.where(condition=(x >= 0.0), x=x, y=negative)

        elif self.name == 'sigmoid':
            x = tf.sigmoid(x=x)

        elif self.name == 'softmax':
            x = tf.nn.softmax(logits=x)

        elif self.name == 'softplus':
            x = tf.nn.softplus(features=x)

        elif self.name == 'tanh':
            x = tf.nn.tanh(x=x)

        else:
            raise TensorForceError('Invalid non-linearity: {}'.format(self.name))

        return x
项目:Defect-Prediction    作者:Jorba123    | 项目源码 | 文件源码
def f1_score(logits, targets_pl, one_hot=False):
    targets = tf.to_int64(targets_pl)

    y_predicted = tf.arg_max(logits, 1)
    if one_hot:
        y_true = tf.arg_max(targets, 1)
    else:
        y_true = logits

    # get true positives (by multiplying the predicted and actual labels we will only get a 1 if both labels are 1)
    tp = tf.count_nonzero(y_predicted * y_true)

    # get true negatives (basically the same as tp only the inverse)
    tn = tf.count_nonzero((y_predicted - 1) * (y_true - 1)) 

    fp = tf.count_nonzero(y_predicted * (y_true - 1))
    fn = tf.count_nonzero((y_predicted - 1) * y_true)

    # Calculate accuracy, precision, recall and F1 score.
    accuracy = (tp + tn) / (tp + fp + fn + tn)
    precision = tp / (tp + fp)
    recall = tp / (tp + fn)
    f1_score = (2 * precision * recall) / (precision + recall)

    tf.summary.scalar('accuracy', accuracy)
    tf.summary.scalar('precision', precision)
    tf.summary.scalar('recall', recall)
    tf.summary.scalar('f1-score', f1_score)

    f1_score = tf.reduce_mean(tf.cast(f1_score, 'float32'), name='f1_score_reduce_mean')
    return f1_score
项目:tbcnn    作者:crestonbunch    | 项目源码 | 文件源码
def eta_r(children, t_coef):
    """Compute weight matrix for how much each vector belogs to the 'right'"""
    with tf.name_scope('coef_r'):
        # children is shape (batch_size x max_tree_size x max_children)
        children = tf.cast(children, tf.float32)
        batch_size = tf.shape(children)[0]
        max_tree_size = tf.shape(children)[1]
        max_children = tf.shape(children)[2]

        # num_siblings is shape (batch_size x max_tree_size x 1)
        num_siblings = tf.cast(
            tf.count_nonzero(children, axis=2, keep_dims=True),
            dtype=tf.float32
        )
        # num_siblings is shape (batch_size x max_tree_size x max_children + 1)
        num_siblings = tf.tile(
            num_siblings, [1, 1, max_children + 1], name='num_siblings'
        )
        # creates a mask of 1's and 0's where 1 means there is a child there
        # has shape (batch_size x max_tree_size x max_children + 1)
        mask = tf.concat(
            [tf.zeros((batch_size, max_tree_size, 1)),
             tf.minimum(children, tf.ones(tf.shape(children)))],
            axis=2, name='mask'
        )

        # child indices for every tree (batch_size x max_tree_size x max_children + 1)
        child_indices = tf.multiply(tf.tile(
            tf.expand_dims(
                tf.expand_dims(
                    tf.range(-1.0, tf.cast(max_children, tf.float32), 1.0, dtype=tf.float32),
                    axis=0
                ),
                axis=0
            ),
            [batch_size, max_tree_size, 1]
        ), mask, name='child_indices')

        # weights for every tree node in the case that num_siblings = 0
        # shape is (batch_size x max_tree_size x max_children + 1)
        singles = tf.concat(
            [tf.zeros((batch_size, max_tree_size, 1)),
             tf.fill((batch_size, max_tree_size, 1), 0.5),
             tf.zeros((batch_size, max_tree_size, max_children - 1))],
            axis=2, name='singles')

        # eta_r is shape (batch_size x max_tree_size x max_children + 1)
        return tf.where(
            tf.equal(num_siblings, 1.0),
            # avoid division by 0 when num_siblings == 1
            singles,
            # the normal case where num_siblings != 1
            tf.multiply((1.0 - t_coef), tf.divide(child_indices, num_siblings - 1.0)),
            name='coef_r'
        )
项目:neural-combinatorial-optimization-rl-tensorflow    作者:MichelDeudon    | 项目源码 | 文件源码
def build_reward(self):

        with tf.name_scope('permutations'):

            # Reorder input % tour
            self.permutations = tf.stack([tf.tile(tf.expand_dims(tf.range(self.batch_size,dtype=tf.int32),1),[1,self.max_length+2]),self.positions],2)
            self.ordered_input_ = tf.gather_nd(self.input_,self.permutations)
            self.ordered_input_ = tf.transpose(self.ordered_input_,[2,1,0]) # [batch size, seq length +1 , features] to [features, seq length +1, batch_size]   Rq: +1 because end = start = depot

            # Ordered coordinates
            ordered_x_ = self.ordered_input_[0] # [seq length +1, batch_size]
            delta_x2 = tf.transpose(tf.square(ordered_x_[1:]-ordered_x_[:-1]),[1,0]) # [batch_size, seq length]        delta_x**2
            ordered_y_ = self.ordered_input_[1] # [seq length +1, batch_size]
            delta_y2 = tf.transpose(tf.square(ordered_y_[1:]-ordered_y_[:-1]),[1,0]) # [batch_size, seq length]        delta_y**2

            # Ordered TW constraints
            self.ordered_tw_mean_ = tf.transpose(self.ordered_input_[2][:-1],[1,0]) # [seq length, batch_size] to [batch_size, seq length]
            self.ordered_tw_width_ = tf.transpose(self.ordered_input_[3][:-1],[1,0]) # [seq length, batch_size] to [batch_size, seq length]

            self.ordered_tw_open_ = self.ordered_tw_mean_ - self.ordered_tw_width_/2
            self.ordered_tw_close_ = self.ordered_tw_mean_ + self.ordered_tw_width_/2

        with tf.name_scope('environment'):

            # Get tour length (euclidean distance)
            inter_city_distances = tf.sqrt(delta_x2+delta_y2) # sqrt(delta_x**2 + delta_y**2) this is the euclidean distance between each city: depot --> ... ---> depot      [batch_size, seq length]
            self.distances = tf.reduce_sum(inter_city_distances, axis=1) # [batch_size]
            variable_summaries('tour_length',self.distances, with_max_min = True)

            # Get time at each city if no constraint
            self.time_at_cities = (1/self.speed)*tf.cumsum(inter_city_distances, axis=1, exclusive=True)-10 # [batch size, seq length]          # Rq: -10 to be on time at depot (t_mean centered)

            # Apply constraints to each city
            self.constrained_delivery_time = []
            cumul_lateness = 0
            for time_open, delivery_time in zip(tf.unstack(self.ordered_tw_open_,axis=1), tf.unstack(self.time_at_cities,axis=1)):  # Unstack % seq length
                delayed_delivery = delivery_time + cumul_lateness
                cumul_lateness += tf.maximum(time_open-delayed_delivery,tf.zeros([self.batch_size])) # if you have to wait... wait (impacts further states)
                self.constrained_delivery_time.append(delivery_time+cumul_lateness)
            self.constrained_delivery_time = tf.stack(self.constrained_delivery_time,1)

            # Define delay from lateness
            self.delay = tf.maximum(self.constrained_delivery_time-self.ordered_tw_close_-0.0001, tf.zeros([self.batch_size,self.max_length+1])) # Delay perceived by the client (doesn't care if the deliver waits..)
            self.delay = tf.count_nonzero(self.delay,1)
            variable_summaries('delay',tf.cast(self.delay,tf.float32), with_max_min = True)

            # Define reward from tour length & delay
            self.reward = tf.cast(self.distances,tf.float32)+self.beta*tf.sqrt(tf.cast(self.delay,tf.float32))
            variable_summaries('reward',self.reward, with_max_min = True)
项目:ADEM    作者:Yoctol    | 项目源码 | 文件源码
def lstm_context_encoder(
        input_with_embedding, mask,
        utterence_level_state_size,
        utterence_level_keep_proba,
        utterence_level_num_layers,
        context_level_state_size,
        context_level_keep_proba,
        context_level_num_layers,
        utterence_level_forget_bias=1.0,
        utterence_level_activation=tf.tanh,
        context_level_forget_bias=1.0,
        context_level_activation=tf.tanh,
        scope_name=None, **kwargs):

    if scope_name is None:
        scope_name = 'lstm_context_encoder'

    with tf.variable_scope(scope_name, reuse=None):
        # context = several utterences (1 or more)
        # two level encoder
        input_shape = tf.shape(input_with_embedding, )

        # utterence level encoder
        utterence_outputs, utterence_states = multi_lstms(
            input_with_embedding=input_with_embedding,
            mask=mask,
            forget_bias=utterence_level_forget_bias,
            activation=utterence_level_activation,
            batch_size=input_shape[0],
            state_size=utterence_level_state_size,
            keep_prob=utterence_level_keep_proba,
            num_layers=utterence_level_num_layers,
            scope_name='utterence_level_lstm',
            init_state=None)

        final_utterence_output = get_last_effective_result(utterence_outputs, mask)

        # context level encoder
        utt_output_shape = tf.shape(final_utterence_output, )
        context_input = tf.reshape(
            final_utterence_output,
            shape=tf.concat([[1], utt_output_shape], axis=0))

        context_mask = tf.count_nonzero([mask], axis=1)

        context_outputs, context_states = multi_lstms(
            input_with_embedding=context_input,
            mask=context_mask,
            forget_bias=context_level_forget_bias,
            activation=context_level_activation,
            batch_size=1,
            state_size=context_level_state_size,
            keep_prob=context_level_keep_proba,
            num_layers=context_level_num_layers,
            scope_name='context_level_lstm',
            init_state=None)

        final_context_output = get_last_effective_result(
            context_outputs, context_mask)

    return final_context_output