Python tensorflow 模块,reduce_min() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.reduce_min()

项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def log_variable(variable, gradient=None):
    r'''
    We introduce a function for logging a tensor variable's current state.
    It logs scalar values for the mean, standard deviation, minimum and maximum.
    Furthermore it logs a histogram of its state and (if given) of an optimization gradient.
    '''
    name = variable.name
    mean = tf.reduce_mean(variable)
    tf.summary.scalar(name='%s/mean'   % name, tensor=mean)
    tf.summary.scalar(name='%s/sttdev' % name, tensor=tf.sqrt(tf.reduce_mean(tf.square(variable - mean))))
    tf.summary.scalar(name='%s/max'    % name, tensor=tf.reduce_max(variable))
    tf.summary.scalar(name='%s/min'    % name, tensor=tf.reduce_min(variable))
    tf.summary.histogram(name=name, values=variable)
    if gradient is not None:
        if isinstance(gradient, tf.IndexedSlices):
            grad_values = gradient.values
        else:
            grad_values = gradient
        if grad_values is not None:
            tf.summary.histogram(name='%s/gradients' % name, values=grad_values)
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def log_variable(variable, gradient=None):
    r'''
    We introduce a function for logging a tensor variable's current state.
    It logs scalar values for the mean, standard deviation, minimum and maximum.
    Furthermore it logs a histogram of its state and (if given) of an optimization gradient.
    '''
    name = variable.name
    mean = tf.reduce_mean(variable)
    tf.summary.scalar(name='%s/mean'   % name, tensor=mean)
    tf.summary.scalar(name='%s/sttdev' % name, tensor=tf.sqrt(tf.reduce_mean(tf.square(variable - mean))))
    tf.summary.scalar(name='%s/max'    % name, tensor=tf.reduce_max(variable))
    tf.summary.scalar(name='%s/min'    % name, tensor=tf.reduce_min(variable))
    tf.summary.histogram(name=name, values=variable)
    if gradient is not None:
        if isinstance(gradient, tf.IndexedSlices):
            grad_values = gradient.values
        else:
            grad_values = gradient
        if grad_values is not None:
            tf.summary.histogram(name='%s/gradients' % name, values=grad_values)
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def log_variable(variable, gradient=None):
    r'''
    We introduce a function for logging a tensor variable's current state.
    It logs scalar values for the mean, standard deviation, minimum and maximum.
    Furthermore it logs a histogram of its state and (if given) of an optimization gradient.
    '''
    name = variable.name
    mean = tf.reduce_mean(variable)
    tf.summary.scalar(name='%s/mean'   % name, tensor=mean)
    tf.summary.scalar(name='%s/sttdev' % name, tensor=tf.sqrt(tf.reduce_mean(tf.square(variable - mean))))
    tf.summary.scalar(name='%s/max'    % name, tensor=tf.reduce_max(variable))
    tf.summary.scalar(name='%s/min'    % name, tensor=tf.reduce_min(variable))
    tf.summary.histogram(name=name, values=variable)
    if gradient is not None:
        if isinstance(gradient, tf.IndexedSlices):
            grad_values = gradient.values
        else:
            grad_values = gradient
        if grad_values is not None:
            tf.summary.histogram(name='%s/gradients' % name, values=grad_values)
项目:squeezeDet-hand    作者:fyhtea    | 项目源码 | 文件源码
def _activation_summary(self, x, layer_name):
    """Helper to create summaries for activations.

    Args:
      x: layer output tensor
      layer_name: name of the layer
    Returns:
      nothing
    """
    with tf.variable_scope('activation_summary') as scope:
      tf.summary.histogram(
          'activation_summary/'+layer_name, x)
      tf.summary.scalar(
          'activation_summary/'+layer_name+'/sparsity', tf.nn.zero_fraction(x))
      tf.summary.scalar(
          'activation_summary/'+layer_name+'/average', tf.reduce_mean(x))
      tf.summary.scalar(
          'activation_summary/'+layer_name+'/max', tf.reduce_max(x))
      tf.summary.scalar(
          'activation_summary/'+layer_name+'/min', tf.reduce_min(x))
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def calculate_loss(self, predictions, labels, **unused_params):
    with tf.name_scope("loss_xent"):
      epsilon = 10e-6
      vocab_size = predictions.get_shape().as_list()[1]
      float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)
      neg_labels = 1 - float_labels
      predictions_pos = predictions*float_labels+10*neg_labels
      predictions_minpos = tf.reduce_min(predictions_pos,axis=1,keep_dims=True)
      predictions_neg = predictions*neg_labels-10*float_labels
      predictions_maxneg = tf.reduce_max(predictions_neg,axis=1,keep_dims=True)
      mask_1 = tf.cast(tf.greater_equal(predictions_neg, predictions_minpos),dtype=tf.float32)
      mask_2 = tf.cast(tf.less_equal(predictions_pos, predictions_maxneg),dtype=tf.float32)
      cross_entropy_loss = cross_entropy_loss*(mask_1+mask_2)*10 + cross_entropy_loss
      return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def get_image_summary(img, idx=0):
    """
    Make an image summary for 4d tensor image with index idx
    """

    V = tf.slice(img, (0, 0, 0, idx), (1, -1, -1, 1))
    V -= tf.reduce_min(V)
    V /= tf.reduce_max(V)
    V *= 255

    img_w = tf.shape(img)[1]
    img_h = tf.shape(img)[2]
    V = tf.reshape(V, tf.stack((img_w, img_h, 1)))
    V = tf.transpose(V, (2, 0, 1))
    V = tf.reshape(V, tf.stack((-1, img_w, img_h, 1)))
    return V
项目:answer-triggering    作者:jiez-osu    | 项目源码 | 文件源码
def bag_hinge_loss(config, preds, sent_mask, flip_sent_mask, hete_mask,
                   sent_trgt, sent_num):
  """ HINGE LOSS:
      DEFINED AS: MAX(0, M - MIN(SENT+) - MAX(SENT-))
      THIS ONLY APPLIES TO HETE BAGS.
  """
  flip_sent_trgt = \
      tf.constant(1, shape=[config.batch_size,sent_num], dtype=config.data_type) - \
      sent_trgt
  pos_preds = preds + flip_sent_trgt + flip_sent_mask # [batch_size, sent_num]
  neg_preds = preds * flip_sent_trgt * sent_mask # [batch_size, sent_num]
  min_pos_pred = tf.reduce_min(pos_preds, 1)
  # min_pos_pred = tf.Print(min_pos_pred, [min_pos_pred], message='min_pos_pred')
  max_neg_pred = tf.reduce_max(neg_preds, 1)
  # max_neg_pred = tf.Print(max_neg_pred, [max_neg_pred], message='max_neg_pred')

  hinge_loss = hete_mask * tf.reduce_max(tf.pack(
      [tf.constant(0, shape=[config.batch_size], dtype=config.data_type),
       (0.20 - min_pos_pred + max_neg_pred)], axis=1), 1) # [batch_size]
  # hinge_loss = tf.Print(hinge_loss, [hinge_loss], message='hinge_loss', summarize=20)

  avg_hinge_loss = tf.reduce_sum(hinge_loss) / (tf.reduce_sum(hete_mask) + 1e-12)
  return avg_hinge_loss
项目:luminoth    作者:tryolabs    | 项目源码 | 文件源码
def variable_summaries(var, name, collections=None):
    """Attach a lot of summaries to a Tensor (for TensorBoard visualization).

    Args:
        - var: Tensor for variable from which we want to log.
        - name: Variable name.
        - collections: List of collections to save the summary to.
    """
    with tf.name_scope(name):
        mean = tf.reduce_mean(var)
        tf.summary.scalar('mean', mean, collections)
        num_params = tf.reduce_prod(tf.shape(var))
        tf.summary.scalar('num_params', num_params, collections)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.summary.scalar('stddev', stddev, collections)
        tf.summary.scalar('max', tf.reduce_max(var), collections)
        tf.summary.scalar('min', tf.reduce_min(var), collections)
        tf.summary.histogram('histogram', var, collections)
        tf.summary.scalar('sparsity', tf.nn.zero_fraction(var), collections)
项目:py-noisemaker    作者:aayars    | 项目源码 | 文件源码
def _conform_kernel_to_tensor(kernel, tensor, shape):
    """ Re-shape a convolution kernel to match the given tensor's color dimensions. """

    l = len(kernel)

    channels = shape[-1]

    temp = np.repeat(kernel, channels)

    temp = tf.reshape(temp, (l, l, channels, 1))

    temp = tf.cast(temp, tf.float32)

    temp /= tf.maximum(tf.reduce_max(temp), tf.reduce_min(temp) * -1)

    return temp
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def min(x, axis=None, keepdims=False):
    """Minimum value in a tensor.

    # Arguments
        x: A tensor or variable.
        axis: An integer, the axis to find minimum values.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`,
            the reduced dimension is retained with length 1.

    # Returns
        A tensor with miminum values of `x`.
    """
    axis = _normalize_axis(axis, ndim(x))
    return tf.reduce_min(x, reduction_indices=axis, keep_dims=keepdims)
项目:tensorflow-CWS-LSTM    作者:elvinpoon    | 项目源码 | 文件源码
def seg_prediction(self):
        outputs, size, batch_size = self.outputs
        num_class = self.config.num_class
        output_w = weight_variable([size, num_class])
        output_b = bias_variable([num_class])
        # outputs = tf.transpose(outputs,[1,0,2])
        tag_trans = weight_variable([num_class, num_class])

        def transition(p, x):
            res = tf.matmul(x, output_w) + output_b
            # deviation = tf.tile(tf.expand_dims(tf.reduce_min(previous_pred, reduction_indices=1), 1),
            #                    [1, num_class])

            # previous_pred -= deviation
            focus = 1.
            res += tf.matmul(p, tag_trans) * focus

            prediction = tf.nn.softmax(res)
            return prediction

        # Recurrent network.
        pred = tf.scan(transition, outputs, initializer=tf.zeros([batch_size, num_class]), parallel_iterations=100)
        pred = tf.transpose(pred, [1, 0, 2])
        return pred
项目:tensorflow-CWS-LSTM    作者:elvinpoon    | 项目源码 | 文件源码
def pos_prediction(self):
        outputs, size, batch_size = self.outputs
        num_class = len(POS_tagging['P'])

        output_w = weight_variable([size, num_class])
        output_b = bias_variable([num_class])
        # outputs = tf.transpose(outputs,[1,0,2])
        tag_trans = weight_variable([num_class, num_class])
        outputs = tf.reverse(outputs, [True, False, False])
        def transition(previous_pred, x):
            res = tf.matmul(x, output_w) + output_b
            deviation = tf.tile(tf.expand_dims(tf.reduce_min(previous_pred, reduction_indices=1), 1),
                                [1, num_class])

            previous_pred -= deviation
            focus = 0.5
            res += tf.matmul(previous_pred, tag_trans) * focus
            prediction = tf.nn.softmax(res)
            return prediction
        # Recurrent network.
        pred = tf.scan(transition, outputs, initializer=tf.zeros([batch_size, num_class]), parallel_iterations=100)
        pred = tf.reverse(pred, [True, False, False])
        pred = tf.transpose(pred, [1, 0, 2])
        return pred
项目:tf_unet    作者:jakeret    | 项目源码 | 文件源码
def get_image_summary(img, idx=0):
    """
    Make an image summary for 4d tensor image with index idx
    """

    V = tf.slice(img, (0, 0, 0, idx), (1, -1, -1, 1))
    V -= tf.reduce_min(V)
    V /= tf.reduce_max(V)
    V *= 255

    img_w = tf.shape(img)[1]
    img_h = tf.shape(img)[2]
    V = tf.reshape(V, tf.stack((img_w, img_h, 1)))
    V = tf.transpose(V, (2, 0, 1))
    V = tf.reshape(V, tf.stack((-1, img_w, img_h, 1)))
    return V
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def summary_param(op, tensor, ndims, name, collections=None):
    """
    Add summary as per the ops mentioned

    Args:
        op: name of the summary op; e.g. 'stddev'
            available ops: ['scalar', 'histogram', 'sparsity', 'mean', 'rms', 'stddev', 'norm', 'max', 'min']
        tensor: the tensor to add summary
        ndims: dimension of the tensor
        name: name of the op
        collections: training or validation collections
    """
    return {
        'scalar': tf.summary.scalar(name, tensor, collections=collections) if ndims == 0 else tf.summary.scalar(name + '/mean', tf.reduce_mean(tensor), collections=collections),
        'histogram': tf.summary.histogram(name, tensor, collections=collections) if ndims >= 2 else None,
        'sparsity': tf.summary.scalar(name + '/sparsity', tf.nn.zero_fraction(tensor), collections=collections),
        'mean': tf.summary.scalar(name + '/mean', tf.reduce_mean(tensor), collections=collections),
        'rms': tf.summary.scalar(name + '/rms', rms(tensor), collections=collections),
        'stddev': tf.summary.scalar(name + '/stddev', tf.sqrt(tf.reduce_sum(tf.square(tensor - tf.reduce_mean(tensor, name='mean_op'))), name='stddev_op'), collections=collections),
        'max': tf.summary.scalar(name + '/max', tf.reduce_max(tensor), collections=collections),
        'min': tf.summary.scalar(name + '/min', tf.reduce_min(tensor), collections=collections),
        'norm': tf.summary.scalar(name + '/norm', tf.sqrt(tf.reduce_sum(tensor * tensor)), collections=collections),
    }[op]
项目:2048-RL-DRQN    作者:Mostafa-Samir    | 项目源码 | 文件源码
def _reduce_max(self, input_tensor, reduction_indices, c):
        """
        a constrainable version of tf.reduce_max

        Parameters:
        -----------
        input_tensor: Tensor
        reduction_indices: Tensor
        c: Tensor
            The constraints tensor
            A tensor of 0s and 1s where 1s represent the elements the reduction
            should be made on, and 0s represent discarded elements
        """

        min_values = tf.reduce_min(input_tensor, reduction_indices, keep_dims=True)
        not_c = tf.abs(c - 1)

        return tf.reduce_max(input_tensor * c + not_c * min_values, reduction_indices)
项目:2048-RL-DRQN    作者:Mostafa-Samir    | 项目源码 | 文件源码
def _reduce_max(self, input_tensor, reduction_indices, c):
        """
        a constrainable version of tf.reduce_max

        Parameters:
        -----------
        input_tensor: Tensor
        reduction_indices: Tensor
        c: Tensor
            The constraints tensor
            A tensor of 0s and 1s where 1s represent the elements the reduction
            should be made on, and 0s represent discarded elements
        """
        with self.session.graph.as_default():
            min_values = tf.reduce_min(input_tensor, reduction_indices, keep_dims=True)
            not_c = tf.abs(c - 1)

            return tf.reduce_max(input_tensor * c + not_c * min_values, reduction_indices)
项目:2048-RL-DRQN    作者:Mostafa-Samir    | 项目源码 | 文件源码
def _argmax(self, input_tensor, dimension, c):
        """
        a constrainable version of tf.argmax

        Parameters:
        -----------
        input_tensor: Tensor
        dimension: Tensor
        c: Tensor
            The constraints tensor
            A tensor of 0s and 1s where 1s represent the elements the reduction
            should be made on, and 0s represent discarded elements
        """
        with self.session.graph.as_default():
            min_values = tf.reduce_min(input_tensor, reduction_indices=[dimension,], keep_dims=True)
            not_c = tf.abs(c - 1)

            return tf.argmax(input_tensor * c + not_c * min_values, dimension)
项目:2048-RL-DRQN    作者:Mostafa-Samir    | 项目源码 | 文件源码
def _reduce_max(self, input_tensor, reduction_indices, c):
        """
        a constrainable version of tf.reduce_max

        Parameters:
        -----------
        input_tensor: Tensor
        reduction_indices: Tensor
        c: Tensor
            The constraints tensor
            A tensor of 0s and 1s where 1s represent the elements the reduction
            should be made on, and 0s represent discarded elements
        """

        min_values = tf.reduce_min(input_tensor, reduction_indices, keep_dims=True)
        not_c = tf.abs(c - 1)

        return tf.reduce_max(input_tensor * c + not_c * min_values, reduction_indices)
项目:2048-RL-DRQN    作者:Mostafa-Samir    | 项目源码 | 文件源码
def _reduce_max(self, input_tensor, reduction_indices, c):
        """
        a constrainable version of tf.reduce_max

        Parameters:
        -----------
        input_tensor: Tensor
        reduction_indices: Tensor
        c: Tensor
            The constraints tensor
            A tensor of 0s and 1s where 1s represent the elements the reduction
            should be made on, and 0s represent discarded elements
        """
        with self.session.graph.as_default():
            min_values = tf.reduce_min(input_tensor, reduction_indices, keep_dims=True)
            not_c = tf.abs(c - 1)

            return tf.reduce_max(input_tensor * c + not_c * min_values, reduction_indices)
项目:MobileNet    作者:Zehaos    | 项目源码 | 文件源码
def curvature_range(self):
    # set up the curvature window
    self._curv_win = \
      tf.Variable(np.zeros( [self._curv_win_width, ] ), dtype=tf.float32, name="curv_win", trainable=False)
    self._curv_win = tf.scatter_update(self._curv_win, 
      self._global_step % self._curv_win_width, self._grad_norm_squared)
    # note here the iterations start from iteration 0
    valid_window = tf.slice(self._curv_win, tf.constant( [0, ] ), 
      tf.expand_dims(tf.minimum(tf.constant(self._curv_win_width), self._global_step + 1), dim=0) )
    self._h_min_t = tf.reduce_min(valid_window)
    self._h_max_t = tf.reduce_max(valid_window)

    curv_range_ops = []
    with tf.control_dependencies([self._h_min_t, self._h_max_t] ):
      avg_op = self._moving_averager.apply([self._h_min_t, self._h_max_t] )
      with tf.control_dependencies([avg_op] ):
        self._h_min = tf.identity(self._moving_averager.average(self._h_min_t) )
        self._h_max = tf.identity(self._moving_averager.average(self._h_max_t) )
    curv_range_ops.append(avg_op)
    return curv_range_ops
项目:ADD-GAN    作者:zblasingame    | 项目源码 | 文件源码
def variable_summaries(var):
    """Attatch summaries of a variable to a Tensor for TensorBoard.

    Args:
        var (tf.Tensor): Tensor variable.
    """

    with tf.name_scope('summaries'):
        mean = tf.reduce_mean(var)
        tf.summary.scalar('mean', mean)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.summary.scalar('stddev', stddev)
        tf.summary.scalar('max', tf.reduce_max(var))
        tf.summary.scalar('min', tf.reduce_min(var))
        tf.summary.histogram('histogram', var)
项目:GMAN    作者:iDurugkar    | 项目源码 | 文件源码
def add_summaries(self):
        self.min_Df = tf.reduce_min(self.Df)
        self.max_Df = tf.reduce_max(self.Df)
        self.min_Dr = tf.reduce_min(self.Dr)
        self.max_Dr = tf.reduce_max(self.Dr)
        tf.summary.scalar('D_0_z', tf.reduce_mean(self.Df[0]))
        tf.summary.scalar('min_D_z', self.min_Df)
        tf.summary.scalar('max_D_z', self.max_Df)
        tf.summary.scalar('D_0_x', tf.reduce_mean(self.Dr[0]))
        tf.summary.scalar('min_D_x', self.min_Dr)
        tf.summary.scalar('max_D_x', self.max_Dr)
        tf.summary.histogram('D_f', self.Df)
        tf.summary.histogram('D_r', self.Dr)
        for ind in range(len(self.D_losses)):
            tf.summary.scalar('D_%d_Loss' % ind, self.D_losses[ind])
        tf.summary.scalar('G_loss', self.G_loss)
        for ind in range(len(self.V_D)):
            tf.summary.scalar('V_D_%d' % ind, self.V_D[ind])
        tf.summary.scalar('V_G', self.V_G)
项目:sentiment_analysis_tensorflow    作者:rvinas    | 项目源码 | 文件源码
def variable_summaries(var, name):
        """
        Attach a lot of summaries to a Tensor for Tensorboard visualization.
        Ref: https://www.tensorflow.org/versions/r0.11/how_tos/summaries_and_tensorboard/index.html
        :param var: Variable to summarize
        :param name: Summary name
        """
        with tf.name_scope('summaries'):
            mean = tf.reduce_mean(var)
            tf.scalar_summary('mean/' + name, mean)
            with tf.name_scope('stddev'):
                stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
            tf.scalar_summary('stddev/' + name, stddev)
            tf.scalar_summary('max/' + name, tf.reduce_max(var))
            tf.scalar_summary('min/' + name, tf.reduce_min(var))
            tf.histogram_summary(name, var)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def min(x, axis=None, keepdims=False):
    """Minimum value in a tensor.

    # Arguments
        x: A tensor or variable.
        axis: An integer, the axis to find minimum values.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`,
            the reduced dimension is retained with length 1.

    # Returns
        A tensor with miminum values of `x`.
    """
    axis = _normalize_axis(axis, ndim(x))
    return tf.reduce_min(x, reduction_indices=axis, keep_dims=keepdims)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def min(x, axis=None, keepdims=False):
    """Minimum value in a tensor.

    # Arguments
        x: A tensor or variable.
        axis: An integer, the axis to find minimum values.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`,
            the reduced dimension is retained with length 1.

    # Returns
        A tensor with miminum values of `x`.
    """
    axis = _normalize_axis(axis, ndim(x))
    return tf.reduce_min(x, reduction_indices=axis, keep_dims=keepdims)
项目:nn-compression    作者:anithapk    | 项目源码 | 文件源码
def quantParam(): #pass saved n/w * suffix
     paramDict = {}
     minMaxDict = {}
     suffix = ["conv","_w:0"]
     with tf.Session() as sess:
        saver = tf.train.import_meta_graph('./LenetParam.meta')
        saver.restore(sess,'./LenetParam')
        conv_wts = [v.name for v in tf.trainable_variables() if (v.name.startswith(suffix[0]) & v.name.endswith(suffix[1]))]
        lay_name = [v.name for v in tf.trainable_variables() if (v.name.endswith("_w:0") | v.name.endswith("_b:0"))]
        for v in lay_name:
            curLay = [a for a in tf.trainable_variables() if (a.name==v)]
            curWt = curLay[0].eval()
            if v in conv_wts:
                quantWt = tf.quantize_v2(curWt,tf.reduce_min(curWt),tf.reduce_max(curWt),tf.qint16,
                    mode="MIN_FIRST",name="quant32to16")
                chk = sess.run(quantWt)
                paramDict.update({v:chk.output})
                minMaxDict.update({v:[chk.output_min,chk.output_max]})
            else:
                chk = curWt
                paramDict.update({v:chk})
     print(paramDict.keys())
     print(minMaxDict.keys())
     return paramDict, minMaxDict
项目:DMNN    作者:magnux    | 项目源码 | 文件源码
def learn_comb(poses, dm_shape, batch_size, max_length, n_dims, reuse=None, _float_type=tf.float32):
    with tf.variable_scope("learn_comb", reuse=reuse):
        comb_matrix = tf.get_variable(
            "matrix", [dm_shape[0], dm_shape[1]],
            initializer=identity_initializer(0.01),
            dtype=_float_type, trainable=True
        )
        norm_comb_matrix = comb_matrix / tf.reduce_sum(comb_matrix, axis=0, keep_dims=True)

        poses = tf.transpose(poses, [0, 1, 3, 2])
        poses = tf.reshape(poses, [batch_size * max_length * n_dims, dm_shape[0]])
        poses = tf.matmul(poses, norm_comb_matrix)
        poses = tf.reshape(poses, [batch_size, max_length, n_dims, dm_shape[0]])
        poses = tf.transpose(poses, [0, 1, 3, 2])
        poses = tf.reshape(poses, [batch_size, max_length, dm_shape[0], n_dims])

        cb_min = tf.reduce_min(norm_comb_matrix)
        cb_max = tf.reduce_max(norm_comb_matrix)
        comb_matrix_image = (norm_comb_matrix - cb_min) / (cb_max - cb_min) * 255.0
        comb_matrix_image = tf.cast(comb_matrix_image, tf.uint8)
        comb_matrix_image = tf.reshape(comb_matrix_image, [1, dm_shape[0], dm_shape[1], 1])
        return poses, comb_matrix_image
项目:DMNN    作者:magnux    | 项目源码 | 文件源码
def learn_comb_unc(poses, dm_shape, batch_size, max_length, n_dims, reuse=None, _float_type=tf.float32):
    with tf.variable_scope("learn_comb", reuse=reuse):
        comb_matrix = tf.get_variable(
            "matrix", [dm_shape[0], dm_shape[1]],
            initializer=identity_initializer(0.01),
            dtype=_float_type, trainable=True
        )

        poses = tf.transpose(poses, [0, 1, 3, 2])
        poses = tf.reshape(poses, [batch_size * max_length * n_dims, dm_shape[0]])
        poses = tf.matmul(poses, comb_matrix)
        poses = tf.reshape(poses, [batch_size, max_length, n_dims, dm_shape[0]])
        poses = tf.transpose(poses, [0, 1, 3, 2])
        poses = tf.reshape(poses, [batch_size, max_length, dm_shape[0], n_dims])

        cb_min = tf.reduce_min(comb_matrix)
        cb_max = tf.reduce_max(comb_matrix)
        comb_matrix_image = (comb_matrix - cb_min) / (cb_max - cb_min) * 255.0
        comb_matrix_image = tf.cast(comb_matrix_image, tf.uint8)
        comb_matrix_image = tf.reshape(comb_matrix_image, [1, dm_shape[0], dm_shape[1], 1])
        return poses, comb_matrix_image
项目:DMNN    作者:magnux    | 项目源码 | 文件源码
def learn_comb_centered(poses, dm_shape, batch_size, max_length, n_dims, reuse=None, _float_type=tf.float32):
    with tf.variable_scope("learn_comb", reuse=reuse):
        comb_matrix = tf.get_variable(
            "matrix", [dm_shape[0], dm_shape[1]],
            initializer=identity_initializer(0.01),
            dtype=_float_type, trainable=True
        )

        pcenter = tf.reduce_mean(poses, axis=2, keep_dims=True)
        poses = poses - pcenter

        poses = tf.transpose(poses, [0, 1, 3, 2])
        poses = tf.reshape(poses, [batch_size * max_length * n_dims, dm_shape[0]])
        poses = tf.matmul(poses, comb_matrix)
        poses = tf.reshape(poses, [batch_size, max_length, n_dims, dm_shape[0]])
        poses = tf.transpose(poses, [0, 1, 3, 2])
        poses = tf.reshape(poses, [batch_size, max_length, dm_shape[0], n_dims])

        cb_min = tf.reduce_min(comb_matrix)
        cb_max = tf.reduce_max(comb_matrix)
        comb_matrix_image = (comb_matrix - cb_min) / (cb_max - cb_min) * 255.0
        comb_matrix_image = tf.cast(comb_matrix_image, tf.uint8)
        comb_matrix_image = tf.reshape(comb_matrix_image, [1, dm_shape[0], dm_shape[1], 1])
        return poses, comb_matrix_image
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def create_model(self,
                     model_input,
                     vocab_size,
                     num_frames,
                     **unused_params):

        shape = model_input.get_shape().as_list()
        frames_sum = tf.reduce_sum(tf.abs(model_input),axis=2)
        frames_true = tf.ones(tf.shape(frames_sum))
        frames_false = tf.zeros(tf.shape(frames_sum))
        frames_bool = tf.reshape(tf.where(tf.greater(frames_sum, frames_false), frames_true, frames_false),[-1,shape[1],1])

        activation_1 = tf.reduce_max(model_input, axis=1)
        activation_2 = tf.reduce_sum(model_input*frames_bool, axis=1)/(tf.reduce_sum(frames_bool, axis=1)+1e-6)
        activation_3 = tf.reduce_min(model_input, axis=1)

        model_input_1, final_probilities_1 = self.sub_moe(activation_1,vocab_size,scopename="_max")
        model_input_2, final_probilities_2 = self.sub_moe(activation_2,vocab_size,scopename="_mean")
        model_input_3, final_probilities_3 = self.sub_moe(activation_3,vocab_size,scopename="_min")
        final_probilities = tf.stack((final_probilities_1,final_probilities_2,final_probilities_3),axis=1)
        weight2d = tf.get_variable("ensemble_weight2d",
                                   shape=[shape[2], 3, vocab_size],
                                   regularizer=slim.l2_regularizer(1.0e-8))
        activations = tf.stack((model_input_1, model_input_2, model_input_3), axis=2)
        weight = tf.nn.softmax(tf.einsum("aij,ijk->ajk", activations, weight2d), dim=1)
        result = {}
        result["prediction_frames"] = tf.reshape(final_probilities,[-1,vocab_size])
        result["predictions"] = tf.reduce_sum(final_probilities*weight,axis=1)
        return result
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def calculate_loss_postprocess(self, predictions, labels, **unused_params):
    with tf.name_scope("loss_postprocess"):
      float_labels = tf.cast(labels, tf.float32)
      predictions_pos = predictions*float_labels + (1-float_labels)
      predictions_neg = predictions*(1-float_labels)
      min_pos = tf.stop_gradient(tf.reduce_min(predictions_pos))
      max_neg = tf.stop_gradient(tf.reduce_max(predictions_neg))
      predictions_pos_mistake = tf.nn.relu(max_neg-predictions_pos)-0.01*tf.nn.relu(predictions_pos-max_neg)
      predictions_neg_mistake = tf.nn.relu(predictions_neg-min_pos)-0.01*tf.nn.relu(min_pos-predictions_neg)
      postprocess_loss = predictions_pos_mistake + predictions_neg_mistake
      return tf.reduce_mean(tf.reduce_sum(postprocess_loss, 1))
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def calculate_loss(self, predictions, labels, margin=0.2, adaptive=3.0, origin=1.0, **unused_params):
    batch_size = FLAGS.batch_size
    num_classes = FLAGS.num_classes
    with tf.name_scope("loss_hinge"):
      # get sim_neg
      mask = tf.cast(labels, tf.float32)
      reverse_mask = 1.0 - mask
      min_true_pred = tf.reduce_min((predictions - 1.0) * mask, axis=1, keep_dims=True) + 1.0
      mask_wrong = tf.stop_gradient(tf.cast(predictions > (min_true_pred - margin), tf.float32) * reverse_mask)
      # get positve samples
      int_labels = tf.cast(labels, tf.int32)
      sample_labels = tf.unstack(int_labels, num=batch_size, axis=0)
      sample_predictions = tf.unstack(predictions, num=batch_size, axis=0)
      positive_predictions = []
      for sample_label, sample_prediction in zip(sample_labels, sample_predictions):
        indices = tf.where(sample_label > 0)
        expanded_indices = tf.tile(indices[:,0], [num_classes])[:num_classes]
        rand_arrange = tf.random_uniform([num_classes], minval=0, maxval=num_classes, dtype=tf.int32)
        positive_indices = tf.stop_gradient(tf.gather(expanded_indices, rand_arrange))
        positive_prediction = tf.gather(sample_prediction, positive_indices)
        positive_predictions.append(positive_prediction)
      positive_predictions = tf.stack(positive_predictions)
      # hinge_loss
      hinge_loss = tf.maximum(predictions - positive_predictions + margin, 0.0)
      adaptive_loss = hinge_loss * mask_wrong
      adaptive_loss = tf.reduce_mean(tf.reduce_sum(adaptive_loss, axis=1))
      origin_loss = hinge_loss * reverse_mask
      origin_loss = tf.reduce_mean(tf.reduce_sum(origin_loss, axis=1))
      loss = adaptive * adaptive_loss + origin * origin_loss
      return loss
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def calculate_loss(self, predictions, labels, topk=20, **unused_params):
    with tf.name_scope("loss_xent_batch"):
      batch_agreement = FLAGS.batch_agreement
      epsilon = 10e-6
      float_batch_size = float(FLAGS.batch_size)

      topk_predictions, _ = tf.nn.top_k(predictions, k=20)
      min_topk_predictions = tf.reduce_min(topk_predictions, axis=1, keep_dims=True)
      topk_mask = tf.cast(predictions >= min_topk_predictions, dtype=tf.float32)

      float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)

      # minimum positive predictions in topk
      positive_predictions = (predictions * float_labels * topk_mask) + 1.0 - (float_labels * topk_mask)
      min_pp = tf.reduce_min(positive_predictions)

      # maximum negative predictions
      negative_predictions = predictions * (1.0 - float_labels)
      max_np = tf.reduce_max(negative_predictions)

      # 1s that fall under top-k
      false_negatives = tf.cast(predictions < min_topk_predictions, tf.float32) * float_labels
      # 0s that grow over 1s in top-k
      false_positives = tf.cast(predictions > min_pp, tf.float32) * (1.0 - float_labels) * topk_mask

      weight = (false_negatives + false_positives) * batch_agreement + 1.0
      weight = tf.stop_gradient(weight)
      print weight
      return tf.reduce_mean(tf.reduce_sum(weight * cross_entropy_loss, 1))
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def multiview(self, cnn_output, axis=1):
    max_view = tf.reduce_max(cnn_output, axis=1)
    mean_view = tf.reduce_mean(cnn_output, axis=1)
    min_view = tf.reduce_min(cnn_output, axis=1)
    multi_view = tf.concat([max_view, mean_view, min_view], axis=1)
    return multi_view
项目:distributional_perspective_on_RL    作者:Kiwoo    | 项目源码 | 文件源码
def min(x, axis=None, keepdims=False):
    return tf.reduce_min(x, axis=None if axis is None else [axis], keep_dims = keepdims)
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def add_weights_summary(weights, name=""):
    with tf.name_scope(name+"_summary"):
        mean = tf.reduce_mean(weights)
        tf.summary.scalar('mean', mean)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(weights - mean)))
        tf.summary.scalar('stddev', stddev)
        tf.summary.scalar('max', tf.reduce_max(weights))
        tf.summary.scalar('min', tf.reduce_min(weights))
        tf.summary.histogram('histogram', weights)
项目:CycleGAN-Tensorflow-PyTorch-Simple    作者:LynnHo    | 项目源码 | 文件源码
def summary(tensor, summary_type=['mean', 'stddev', 'max', 'min', 'sparsity', 'histogram']):
    """ Attach a lot of summaries to a Tensor. """

    # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
    # session. This helps the clarity of presentation on tensorboard.
    tensor_name = re.sub('%s_[0-9]*/' % 'tower', '', tensor.name)
    tensor_name = re.sub(':', '-', tensor_name)

    with tf.name_scope('summary_' + tensor_name):
        summaries = []
        if len(tensor._shape) == 0:
            summaries.append(tf.summary.scalar(tensor_name, tensor))
        else:
            if 'mean' in summary_type:
                mean = tf.reduce_mean(tensor)
                summaries.append(tf.summary.scalar(tensor_name + '/mean', mean))
            if 'stddev' in summary_type:
                mean = tf.reduce_mean(tensor)
                stddev = tf.sqrt(tf.reduce_mean(tf.square(tensor - mean)))
                summaries.append(tf.summary.scalar(tensor_name + '/stddev', stddev))
            if 'max' in summary_type:
                summaries.append(tf.summary.scalar(tensor_name + '/max', tf.reduce_max(tensor)))
            if 'min' in summary_type:
                summaries.append(tf.summary.scalar(tensor_name + '/min', tf.reduce_min(tensor)))
            if 'sparsity' in summary_type:
                summaries.append(tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(tensor)))
            if 'histogram' in summary_type:
                summaries.append(tf.summary.histogram(tensor_name, tensor))
        return tf.summary.merge(summaries)
项目:handwritten-sequence-tensorflow    作者:johnsmithm    | 项目源码 | 文件源码
def variable_summaries(var):
      """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
      with tf.name_scope('summaries'):
        mean = tf.reduce_mean(var)
        tf.summary.scalar('mean', mean)
        with tf.name_scope('stddev'):
          stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.summary.scalar('stddev', stddev)
        tf.summary.scalar('max', tf.reduce_max(var))
        tf.summary.scalar('min', tf.reduce_min(var))
        tf.summary.histogram('histogram', var)
项目:agent-trainer    作者:lopespm    | 项目源码 | 文件源码
def _convolutional_layer(self, input, patch_size, stride, input_channels, output_channels, bias_init_value, scope_name):
        with tf.variable_scope(scope_name) as scope:
            weights = tf.get_variable(name='weights',
                                  shape=[patch_size, patch_size, input_channels, output_channels],
                                  initializer=tf.contrib.layers.xavier_initializer_conv2d())
            biases = tf.Variable(name='biases', initial_value=tf.constant(value=bias_init_value, shape=[output_channels]))
            conv = tf.nn.conv2d(input, weights, [1, stride, stride, 1], padding='SAME')

            linear_rectification_bias = tf.nn.bias_add(conv, biases)
            output = tf.nn.relu(linear_rectification_bias, name=scope.name)

            grid_x = output_channels // 4
            grid_y = 4 * input_channels
            kernels_image_grid = self._create_kernels_image_grid(weights, (grid_x, grid_y))
            tf.image_summary(scope_name + '/features', kernels_image_grid, max_images=1)

            if "_conv1" in scope_name:
                x_min = tf.reduce_min(weights)
                x_max = tf.reduce_max(weights)
                weights_0_to_1 = (weights - x_min) / (x_max - x_min)
                weights_0_to_255_uint8 = tf.image.convert_image_dtype(weights_0_to_1, dtype=tf.uint8)

                # to tf.image_summary format [batch_size, height, width, channels]
                weights_transposed = tf.transpose(weights_0_to_255_uint8, [3, 0, 1, 2])

                tf.image_summary(scope_name + '/features', weights_transposed[:,:,:,0:1], max_images=32)

        return output
项目:Supply-demand-forecasting    作者:LevinJ    | 项目源码 | 文件源码
def variable_summaries(self, var, name):
        """Attach a lot of summaries to a Tensor."""
        with tf.name_scope('summaries'):
            mean = tf.reduce_mean(var)
            tf.scalar_summary('mean/' + name, mean)
            with tf.name_scope('stddev'):
                stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
            tf.scalar_summary('sttdev/' + name, stddev)
            tf.scalar_summary('max/' + name, tf.reduce_max(var))
            tf.scalar_summary('min/' + name, tf.reduce_min(var))
            tf.histogram_summary(name, var)
        return
项目:tensorflow-prebuilt-classifier    作者:recursionbane    | 项目源码 | 文件源码
def variable_summaries(var):
  """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
  with tf.name_scope('summaries'):
    mean = tf.reduce_mean(var)
    tf.summary.scalar('mean', mean)
    with tf.name_scope('stddev'):
      stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
    tf.summary.scalar('stddev', stddev)
    tf.summary.scalar('max', tf.reduce_max(var))
    tf.summary.scalar('min', tf.reduce_min(var))
    tf.summary.histogram('histogram', var)
项目:seglink    作者:bgshih    | 项目源码 | 文件源码
def print_tensor_summary(tensor, tag=None, n_print=21):
  tensor_min = tf.reduce_min(tensor)
  tensor_max = tf.reduce_max(tensor)
  tensor_avg = tf.reduce_mean(tensor)
  tensor_zero_fraction = tf.nn.zero_fraction(tensor)
  tensor_shape = tf.shape(tensor)
  tag = tag or tensor.name
  tensor = tf.Print(tensor,
                    [tensor_min, tensor_max, tensor_avg, tensor_zero_fraction, tensor_shape, tensor],
                    message=(tag + ' Min, max, mean, sparsity, shape, value:'),
                    summarize=n_print)
  return tensor
项目:OpenAI_Challenges    作者:AlwaysLearningDeeper    | 项目源码 | 文件源码
def variable_summaries(var):
  """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
  with tf.name_scope('summaries'):
    mean = tf.reduce_mean(var)
    tf.summary.scalar('mean', mean)
    with tf.name_scope('stddev'):
      stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
    tf.summary.scalar('stddev', stddev)
    tf.summary.scalar('max', tf.reduce_max(var))
    tf.summary.scalar('min', tf.reduce_min(var))
    tf.summary.histogram('histogram', var)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def min(x, axis=None, keepdims=False):
    '''Minimum value in a tensor.
    '''
    axis = _normalize_axis(axis, ndim(x))
    return tf.reduce_min(x, reduction_indices=axis, keep_dims=keepdims)
项目:chemblnet    作者:jaak-s    | 项目源码 | 文件源码
def summarize(self, sess):
        mean_info = sess.run([tf.sqrt(tf.nn.l2_loss(self.mean)), tf.reduce_min(self.mean), tf.reduce_max(self.mean)])
        var_info  = sess.run([tf.sqrt(tf.nn.l2_loss(self.var)), tf.reduce_min(self.var), tf.reduce_max(self.var)])
        prec_info = sess.run([tf.reduce_min(self.prec), tf.reduce_max(self.prec)])
        return ("l2(%.2e), prec(%.2e, %.2e) std(%.2e, %.2e)" % (mean_info[0], prec_info[0], prec_info[1], np.sqrt(var_info[1]), np.sqrt(var_info[2])) )
项目:chemblnet    作者:jaak-s    | 项目源码 | 文件源码
def summarize(self, sess):
        mean_info = sess.run([tf.sqrt(tf.nn.l2_loss(self.mean)), tf.reduce_min(self.mean), tf.reduce_max(self.mean)])
        var_info  = sess.run([tf.sqrt(tf.nn.l2_loss(self.var)), tf.reduce_min(self.var), tf.reduce_max(self.var)])
        prec_info = sess.run([self.prec.min(), self.prec.max()])
        return ("l2(%.2e), prec(%.2e, %.2e) std(%.2e, %.2e)" % (mean_info[0], prec_info[0], prec_info[1], np.sqrt(var_info[1]), np.sqrt(var_info[2])) )
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def batch_accuracy(a, b):
    "Each point of a is measured against the closest point on b.  Distance differences are added together."
    tiled_a = a
    tiled_a = tf.reshape(tiled_a, [int(tiled_a.get_shape()[0]), 1, int(tiled_a.get_shape()[1])])

    tiled_a = tf.tile(tiled_a, [1, int(tiled_a.get_shape()[0]), 1])

    tiled_b = b
    tiled_b = tf.reshape(tiled_b, [1, int(tiled_b.get_shape()[0]), int(tiled_b.get_shape()[1])])
    tiled_b = tf.tile(tiled_b, [int(tiled_b.get_shape()[0]), 1, 1])

    difference = tf.abs(tiled_a-tiled_b)
    difference = tf.reduce_min(difference, axis=1)
    difference = tf.reduce_sum(difference, axis=1)
    return tf.reduce_sum(difference, axis=0)
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def accuracy(a, b):
    "Each point of a is measured against the closest point on b.  Distance differences are added together."
    difference = tf.abs(a-b)
    difference = tf.reduce_min(difference, axis=1)
    difference = tf.reduce_sum(difference, axis=1)
    return tf.reduce_sum( tf.reduce_sum(difference, axis=0) , axis=0)
项目:baselines    作者:openai    | 项目源码 | 文件源码
def detectMinVal(input_mat, var, threshold=1e-6, name='', debug=False):
    eigen_min = tf.reduce_min(input_mat)
    eigen_max = tf.reduce_max(input_mat)
    eigen_ratio = eigen_max / eigen_min
    input_mat_clipped = clipoutNeg(input_mat, threshold)

    if debug:
        input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)), lambda: input_mat_clipped, lambda: tf.Print(
            input_mat_clipped, [tf.convert_to_tensor('screwed ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name), eigen_min, eigen_max, eigen_ratio]))

    return input_mat_clipped
项目:image_recognition    作者:tue-robotics    | 项目源码 | 文件源码
def variable_summaries(var, name):
  """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
  with tf.name_scope('summaries'):
    mean = tf.reduce_mean(var)
    tf.summary.scalar('mean/' + name, mean)
    with tf.name_scope('stddev'):
      stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
    tf.summary.scalar('stddev/' + name, stddev)
    tf.summary.scalar('max/' + name, tf.reduce_max(var))
    tf.summary.scalar('min/' + name, tf.reduce_min(var))
    tf.summary.histogram(name, var)