Python tensorflow 模块,cond() 实例源码

我们从Python开源项目中,提取了以下47个代码示例,用于说明如何使用tensorflow.cond()

项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def _leapfrog(self, q, p, step_size, get_gradient, mass):
        def loop_cond(i, q, p):
            return i < self.n_leapfrogs + 1

        def loop_body(i, q, p):
            step_size1 = tf.cond(i > 0,
                                 lambda: step_size,
                                 lambda: tf.constant(0.0, dtype=tf.float32))

            step_size2 = tf.cond(tf.logical_and(tf.less(i, self.n_leapfrogs),
                                                tf.less(0, i)),
                                 lambda: step_size,
                                 lambda: step_size / 2)

            q, p = leapfrog_integrator(q, p, step_size1, step_size2,
                                       lambda q: get_gradient(q), mass)
            return [i + 1, q, p]

        i = tf.constant(0)
        _, q, p = tf.while_loop(loop_cond,
                                loop_body,
                                [i, q, p],
                                back_prop=False,
                                parallel_iterations=1)
        return q, p
项目:GELUs    作者:hendrycks    | 项目源码 | 文件源码
def ae(x):
    if nonlinearity_name == 'relu':
        f = tf.nn.relu
    elif nonlinearity_name == 'elu':
        f = tf.nn.elu
    elif nonlinearity_name == 'gelu':
        # def gelu(x):
        #     return tf.mul(x, tf.erfc(-x / tf.sqrt(2.)) / 2.)
        # f = gelu
        def gelu_fast(_x):
            return 0.5 * _x * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (_x + 0.044715 * tf.pow(_x, 3))))
        f = gelu_fast
    elif nonlinearity_name == 'silu':
        def silu(_x):
            return _x * tf.sigmoid(_x)
        f = silu
    # elif nonlinearity_name == 'soi':
    #     def soi_map(x):
    #         u = tf.random_uniform(tf.shape(x))
    #         mask = tf.to_float(tf.less(u, (1 + tf.erf(x / tf.sqrt(2.))) / 2.))
    #         return tf.cond(is_training, lambda: tf.mul(mask, x),
    #                        lambda: tf.mul(x, tf.erfc(-x / tf.sqrt(2.)) / 2.))
    #     f = soi_map

    else:
        raise NameError("Need 'relu', 'elu', 'gelu', or 'silu' for nonlinearity_name")

    h1 = f(tf.matmul(x, W['1']) + b['1'])
    h2 = f(tf.matmul(h1, W['2']) + b['2'])
    h3 = f(tf.matmul(h2, W['3']) + b['3'])
    h4 = f(tf.matmul(h3, W['4']) + b['4'])
    h5 = f(tf.matmul(h4, W['5']) + b['5'])
    h6 = f(tf.matmul(h5, W['6']) + b['6'])
    h7 = f(tf.matmul(h6, W['7']) + b['7'])
    return tf.matmul(h7, W['8']) + b['8']
项目:distributional_perspective_on_RL    作者:Kiwoo    | 项目源码 | 文件源码
def switch(condition, then_expression, else_expression):
    '''Switches between two operations depending on a scalar value (int or bool).
    Note that both `then_expression` and `else_expression`
    should be symbolic tensors of the *same shape*.

    # Arguments
        condition: scalar tensor.
        then_expression: TensorFlow operation.
        else_expression: TensorFlow operation.
    '''
    x_shape = copy.copy(then_expression.get_shape())
    x = tf.cond(tf.cast(condition, 'bool'),
                lambda: then_expression,
                lambda: else_expression)
    x.set_shape(x_shape)
    return x

# Extras
项目:pointnet    作者:charlesq34    | 项目源码 | 文件源码
def dropout(inputs,
            is_training,
            scope,
            keep_prob=0.5,
            noise_shape=None):
  """ Dropout layer.

  Args:
    inputs: tensor
    is_training: boolean tf.Variable
    scope: string
    keep_prob: float in [0,1]
    noise_shape: list of ints

  Returns:
    tensor variable
  """
  with tf.variable_scope(scope) as sc:
    outputs = tf.cond(is_training,
                      lambda: tf.nn.dropout(inputs, keep_prob, noise_shape),
                      lambda: inputs)
    return outputs
项目:HandDetection    作者:YunqiuXu    | 项目源码 | 文件源码
def batch_norm_layer(self, to_be_normalized, is_training):
    if is_training:
      train_phase = tf.constant(1)
    else:
      train_phase = tf.constant(-1)
    beta = tf.Variable(tf.constant(0.0, shape=[to_be_normalized.shape[-1]]), name='beta', trainable=True)
    gamma = tf.Variable(tf.constant(1.0, shape=[to_be_normalized.shape[-1]]), name='gamma', trainable=True)
    # axises = np.arange(len(to_be_normalized.shape) - 1) # change to apply tensorflow 1.3
    axises = [0,1,2]

    print("start nn.moments")
    print("axises : " + str(axises))
    batch_mean, batch_var = tf.nn.moments(to_be_normalized, axises, name='moments')
    print("nn.moments successful")
    ema = tf.train.ExponentialMovingAverage(decay=0.5)

    def mean_var_with_update():
        ema_apply_op = ema.apply([batch_mean, batch_var])
        with tf.control_dependencies([ema_apply_op]):
            return tf.identity(batch_mean), tf.identity(batch_var)

    mean, var = tf.cond(train_phase > 0, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) # if is training --> update
    normed = tf.nn.batch_normalization(to_be_normalized, mean, var, beta, gamma, 1e-3)
    return normed
项目:tf-crnn    作者:solivr    | 项目源码 | 文件源码
def image_reading(path: str, resized_size: Tuple[int, int]=None, data_augmentation: bool=False,
                  padding: bool=False) -> Tuple[tf.Tensor, tf.Tensor]:
    # Read image
    image_content = tf.read_file(path, name='image_reader')
    image = tf.cond(tf.equal(tf.string_split([path], '.').values[1], tf.constant('jpg', dtype=tf.string)),
                    true_fn=lambda: tf.image.decode_jpeg(image_content, channels=1, try_recover_truncated=True), # TODO channels = 3 ?
                    false_fn=lambda: tf.image.decode_png(image_content, channels=1), name='image_decoding')

    # Data augmentation
    if data_augmentation:
        image = augment_data(image)

    # Padding
    if padding:
        with tf.name_scope('padding'):
            image, img_width = padding_inputs_width(image, resized_size, increment=CONST.DIMENSION_REDUCTION_W_POOLING)
    # Resize
    else:
        image = tf.image.resize_images(image, size=resized_size)
        img_width = tf.shape(image)[1]

    with tf.control_dependencies([tf.assert_equal(image.shape[:2], resized_size)]):
        return image, img_width
项目:tf-crnn    作者:solivr    | 项目源码 | 文件源码
def random_rotation(img: tf.Tensor, max_rotation: float=0.1, crop: bool=True) -> tf.Tensor:  # from SeguinBe
    with tf.name_scope('RandomRotation'):
        rotation = tf.random_uniform([], -max_rotation, max_rotation)
        rotated_image = tf.contrib.image.rotate(img, rotation, interpolation='BILINEAR')
        if crop:
            rotation = tf.abs(rotation)
            original_shape = tf.shape(rotated_image)[:2]
            h, w = original_shape[0], original_shape[1]
            # see https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders for formulae
            old_l, old_s = tf.cond(h > w, lambda: [h, w], lambda: [w, h])
            old_l, old_s = tf.cast(old_l, tf.float32), tf.cast(old_s, tf.float32)
            new_l = (old_l * tf.cos(rotation) - old_s * tf.sin(rotation)) / tf.cos(2*rotation)
            new_s = (old_s - tf.sin(rotation) * new_l) / tf.cos(rotation)
            new_h, new_w = tf.cond(h > w, lambda: [new_l, new_s], lambda: [new_s, new_l])
            new_h, new_w = tf.cast(new_h, tf.int32), tf.cast(new_w, tf.int32)
            bb_begin = tf.cast(tf.ceil((h-new_h)/2), tf.int32), tf.cast(tf.ceil((w-new_w)/2), tf.int32)
            rotated_image_crop = rotated_image[bb_begin[0]:h - bb_begin[0], bb_begin[1]:w - bb_begin[1], :]

            # If crop removes the entire image, keep the original image
            rotated_image = tf.cond(tf.equal(tf.size(rotated_image_crop), 0),
                                    true_fn=lambda: img,
                                    false_fn=lambda: rotated_image_crop)

        return rotated_image
项目:paraphrase-id-tensorflow    作者:nelson-liu    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        # Get the dropped-out outputs and state
        outputs_do, new_state_do = super(SwitchableDropoutWrapper,
                                         self).__call__(
                                             inputs, state, scope=scope)
        tf.get_variable_scope().reuse_variables()
        # Get the un-dropped-out outputs and state
        outputs, new_state = self._cell(inputs, state, scope)

        # Set the outputs and state to be the dropped out version if we are
        # training, and no dropout if we are not training.
        outputs = tf.cond(self.is_train, lambda: outputs_do,
                          lambda: outputs * (self._output_keep_prob))
        if isinstance(state, tuple):
            new_state = state.__class__(
                *[tf.cond(self.is_train, lambda: new_state_do_i,
                          lambda: new_state_i)
                  for new_state_do_i, new_state_i in
                  zip(new_state_do, new_state)])
        else:
            new_state = tf.cond(self.is_train, lambda: new_state_do,
                                lambda: new_state)
        return outputs, new_state
项目:RL_NFSP    作者:Richard-An    | 项目源码 | 文件源码
def batch_norm(self, X):
        train_phase = self.train_phase
        with tf.name_scope('bn'):
            n_out = X.get_shape()[-1:]
            beta = tf.Variable(tf.constant(0.0, shape=n_out), name='beta', trainable=True)
            gamma = tf.Variable(tf.constant(1.0, shape=n_out), name='gamma', trainable=True)
            # batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            ema = tf.train.ExponentialMovingAverage(decay=0.5)

            def mean_var_with_update():
                ema_apply_op = ema.apply([batch_mean, batch_var])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(batch_mean), tf.identity(batch_var)

            mean, var = tf.cond(train_phase, mean_var_with_update,
                                lambda: (ema.average(batch_mean), ema.average(batch_var)))
            normed = tf.nn.batch_normalization(X, mean, var, beta, gamma, 1e-3)
        return normed
项目:RL_NFSP    作者:Richard-An    | 项目源码 | 文件源码
def batch_norm(self, X):
        train_phase = self.train_phase
        with tf.name_scope('bn'):
            n_out = X.get_shape()[-1:]
            beta = tf.Variable(tf.constant(0.0, shape=n_out), name='beta', trainable=True)
            gamma = tf.Variable(tf.constant(1.0, shape=n_out), name='gamma', trainable=True)
            # batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            ema = tf.train.ExponentialMovingAverage(decay=0.5)

            def mean_var_with_update():
                ema_apply_op = ema.apply([batch_mean, batch_var])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(batch_mean), tf.identity(batch_var)

            mean, var = tf.cond(train_phase, mean_var_with_update,
                                lambda: (ema.average(batch_mean), ema.average(batch_var)))
            normed = tf.nn.batch_normalization(X, mean, var, beta, gamma, 1e-3)
        return normed
项目:RL_NFSP    作者:Richard-An    | 项目源码 | 文件源码
def batch_norm(self, X):
        train_phase = self.train_phase
        with tf.name_scope('bn'):
            n_out = X.get_shape()[-1:]
            beta = tf.Variable(tf.constant(0.0, shape=n_out), name='beta', trainable=True)
            gamma = tf.Variable(tf.constant(1.0, shape=n_out), name='gamma', trainable=True)
            # batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            ema = tf.train.ExponentialMovingAverage(decay=0.5)

            def mean_var_with_update():
                ema_apply_op = ema.apply([batch_mean, batch_var])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(batch_mean), tf.identity(batch_var)

            mean, var = tf.cond(train_phase, mean_var_with_update,
                                lambda: (ema.average(batch_mean), ema.average(batch_var)))
            normed = tf.nn.batch_normalization(X, mean, var, beta, gamma, 1e-3)
        return normed
项目:RL_NFSP    作者:Richard-An    | 项目源码 | 文件源码
def batch_norm(self, X):
        train_phase = self.train_phase
        with tf.name_scope('bn'):
            n_out = X.get_shape()[-1:]
            beta = tf.Variable(tf.constant(0.0, shape=n_out), name='beta', trainable=True)
            gamma = tf.Variable(tf.constant(1.0, shape=n_out), name='gamma', trainable=True)
            # batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            ema = tf.train.ExponentialMovingAverage(decay=0.5)

            def mean_var_with_update():
                ema_apply_op = ema.apply([batch_mean, batch_var])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(batch_mean), tf.identity(batch_var)

            mean, var = tf.cond(train_phase, mean_var_with_update,
                                lambda: (ema.average(batch_mean), ema.average(batch_var)))
            normed = tf.nn.batch_normalization(X, mean, var, beta, gamma, 1e-3)
        return normed
项目:RL_NFSP    作者:Richard-An    | 项目源码 | 文件源码
def batch_norm(self, X):
        train_phase = self.train_phase
        with tf.name_scope('bn'):
            n_out = X.get_shape()[-1:]
            beta = tf.Variable(tf.constant(0.0, shape=n_out), name='beta', trainable=True)
            gamma = tf.Variable(tf.constant(1.0, shape=n_out), name='gamma', trainable=True)
            # batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            ema = tf.train.ExponentialMovingAverage(decay=0.5)

            def mean_var_with_update():
                ema_apply_op = ema.apply([batch_mean, batch_var])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(batch_mean), tf.identity(batch_var)

            mean, var = tf.cond(train_phase, mean_var_with_update,
                                lambda: (ema.average(batch_mean), ema.average(batch_var)))
            normed = tf.nn.batch_normalization(X, mean, var, beta, gamma, 1e-3)
        return normed
项目:tensorflow_ocr    作者:BowieHsu    | 项目源码 | 文件源码
def OHNM_single_image(scores, n_pos, neg_mask):
    """Online Hard Negative Mining.
    scores: the scores of being predicted as negative cls
    n_pos: the number of positive samples 
    neg_mask: mask of negative samples
    Return:
    the mask of selected negative samples.
    if n_pos == 0, no negative samples will be selected.
    """
    def has_pos():
        n_neg = n_pos * 3
        max_neg_entries = tf.reduce_sum(tf.cast(neg_mask, tf.int32))
        n_neg = tf.minimum(n_neg, max_neg_entries)
        n_neg = tf.cast(n_neg, tf.int32)
        neg_conf = tf.boolean_mask(scores, neg_mask)
        vals, _ = tf.nn.top_k(-neg_conf, k=n_neg)
        threshold = vals[-1]# a negtive value
        selected_neg_mask = tf.logical_and(neg_mask, scores <= -threshold)
        return tf.cast(selected_neg_mask, tf.float32)

    def no_pos():
        return tf.zeros_like(neg_mask, tf.float32)

    return tf.cond(n_pos > 0, has_pos, no_pos)
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def _largest_size_at_most(height, width, largest_side):
  """Computes new shape with the largest side equal to `largest_side`.
  Computes new shape with the largest side equal to `largest_side` while
  preserving the original aspect ratio.
  Args:
    height: an int32 scalar tensor indicating the current height.
    width: an int32 scalar tensor indicating the current width.
    largest_side: A python integer or scalar `Tensor` indicating the size of
      the largest side after resize.
  Returns:
    new_height: an int32 scalar tensor indicating the new height.
    new_width: and int32 scalar tensor indicating the new width.
  """
  largest_side = tf.convert_to_tensor(largest_side, dtype=tf.int32)

  height = tf.to_float(height)
  width = tf.to_float(width)
  largest_side = tf.to_float(largest_side)

  scale = tf.cond(tf.greater(height, width),
                  lambda: largest_side / height,
                  lambda: largest_side / width)
  new_height = tf.to_int32(height * scale)
  new_width = tf.to_int32(width * scale)
  return new_height, new_width
项目:WassersteinGAN.tensorflow    作者:shekkizh    | 项目源码 | 文件源码
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5, stddev=0.02):
    """
    Code taken from http://stackoverflow.com/a/34634291/2267819
    """
    with tf.variable_scope(scope):
        beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
                               , trainable=True)
        gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, stddev),
                                trainable=True)
        batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(phase_train,
                            mean_var_with_update,
                            lambda: (ema.average(batch_mean), ema.average(batch_var)))
        normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
    return normed
项目:qrn    作者:uwnlp    | 项目源码 | 文件源码
def pre(self, inputs, scope=None):
        """Preprocess inputs to be used by the cell. Assumes [N, J, *]
        [x, u]"""
        is_train = self._is_train
        keep_prob = self._keep_prob
        gate_size = self._gate_size
        with tf.variable_scope(scope or "pre"):
            x, u, _, _ = tf.split(2, 4, tf.slice(inputs, [0, 0, gate_size], [-1, -1, -1]))  # [N, J, d]
            a_raw = linear([x * u], gate_size, True, scope='a_raw', var_on_cpu=self._var_on_cpu,
                           wd=self._wd, initializer=self._initializer)
            a = tf.sigmoid(a_raw - self._forget_bias, name='a')
            if keep_prob < 1.0:
                x = tf.cond(is_train, lambda: tf.nn.dropout(x, keep_prob), lambda: x)
                u = tf.cond(is_train, lambda: tf.nn.dropout(u, keep_prob), lambda: u)
            v_t = tf.nn.tanh(linear([x, u], self._num_units, True,
                             var_on_cpu=self._var_on_cpu, wd=self._wd, scope='v_raw'), name='v')
            new_inputs = tf.concat(2, [a, x, u, v_t])  # [N, J, 3*d + 1]
        return new_inputs
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def tune(self, acceptance_rate, fresh_start):
        def adapt_stepsize():
            new_step = tf.assign(self.step, (1 - fresh_start) * self.step + 1)
            rate1 = tf.div(1.0, new_step + self.t0)
            new_h_bar = tf.assign(
                self.h_bar, (1 - fresh_start) * (1 - rate1) * self.h_bar +
                rate1 * (self.delta - acceptance_rate))
            log_epsilon = self.mu - tf.sqrt(new_step) / self.gamma * new_h_bar
            rate = tf.pow(new_step, -self.kappa)
            new_log_epsilon_bar = tf.assign(
                self.log_epsilon_bar,
                rate * log_epsilon + (1 - fresh_start) * (1 - rate) *
                self.log_epsilon_bar)
            with tf.control_dependencies([new_log_epsilon_bar]):
                new_log_epsilon = tf.identity(log_epsilon)

            return tf.exp(new_log_epsilon)

        c = tf.cond(self.adapt_step_size,
                    adapt_stepsize,
                    lambda: tf.exp(self.log_epsilon_bar))

        return c
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def _adapt_mass(self, t, num_chain_dims):
        ewmv = ExponentialWeightedMovingVariance(
            self.mass_decay, self.data_shapes, num_chain_dims)
        new_mass = tf.cond(self.adapt_mass,
                           lambda: ewmv.get_updated_precision(self.q),
                           lambda: ewmv.precision())
        if not isinstance(new_mass, list):
            new_mass = [new_mass]

        # print('New mass is = {}'.format(new_mass))
        # TODO incorrect shape?
        # print('New mass={}'.format(new_mass))
        # print('q={}, NMS={}'.format(self.q[0].get_shape(),
        #                             new_mass[0].get_shape()))
        with tf.control_dependencies(new_mass):
            current_mass = tf.cond(
                tf.less(tf.to_int32(t), self.mass_collect_iters),
                lambda: [tf.ones(shape) for shape in self.data_shapes],
                lambda: new_mass)
        if not isinstance(current_mass, list):
            current_mass = [current_mass]
        return current_mass
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def is_same_dynamic_shape(x, y):
    """
    Whether `x` and `y` has the same dynamic shape.

    :param x: A Tensor.
    :param y: A Tensor.
    :return: A scalar Tensor of `bool`.
    """
    # There is a BUG of Tensorflow for not doing static shape inference
    # right in nested tf.cond()'s, so we are not comparing x and y's
    # shape directly but working with their concatenations.
    return tf.cond(
        tf.equal(tf.rank(x), tf.rank(y)),
        lambda: tf.reduce_all(tf.equal(
            tf.concat([tf.shape(x), tf.shape(y)], 0),
            tf.concat([tf.shape(y), tf.shape(x)], 0))),
        lambda: tf.convert_to_tensor(False, tf.bool))
项目:AutoPortraitMatting    作者:PetroWu    | 项目源码 | 文件源码
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5):
    """
    Code taken from http://stackoverflow.com/a/34634291/2267819
    """
    with tf.variable_scope(scope):
        beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
                               , trainable=True)
        gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, 0.02),
                                trainable=True)
        batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(phase_train,
                            mean_var_with_update,
                            lambda: (ema.average(batch_mean), ema.average(batch_var)))
        normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
    return normed
项目:AutoPortraitMatting    作者:PetroWu    | 项目源码 | 文件源码
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5):
    """
    Code taken from http://stackoverflow.com/a/34634291/2267819
    """
    with tf.variable_scope(scope):
        beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
                               , trainable=True)
        gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, 0.02),
                                trainable=True)
        batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(phase_train,
                            mean_var_with_update,
                            lambda: (ema.average(batch_mean), ema.average(batch_var)))
        normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
    return normed
项目:Multi-channel-speech-extraction-using-DNN    作者:zhr1201    | 项目源码 | 文件源码
def loss(self, inf_targets, inf_vads, targets, vads, mtl_fac):
        '''
        Loss definition
        Only speech inference loss is defined and work quite well
        Add VAD cross entropy loss if you want
        '''
        loss_v1 = tf.nn.l2_loss(inf_targets - targets) / self.batch_size
        loss_o = loss_v1
        reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        # ipdb.set_trace()
        loss_v = loss_o + tf.add_n(reg_loss)
        tf.scalar_summary('loss', loss_v)
        # loss_merge = tf.cond(
        #     is_val, lambda: tf.scalar_summary('val_loss_batch', loss_v),
        #     lambda: tf.scalar_summary('loss', loss_v))
        return loss_v, loss_o
        # return tf.reduce_mean(tf.nn.l2_loss(inf_targets - targets))
项目:DocumentSegmentation    作者:SeguinBe    | 项目源码 | 文件源码
def data_augmentation_fn(input_image: tf.Tensor, label_image: tf.Tensor) -> (tf.Tensor, tf.Tensor):
    with tf.name_scope('DataAugmentation'):
        with tf.name_scope('random_flip_lr'):
            sample = tf.random_uniform([], 0, 1)
            label_image = tf.cond(sample > 0.5, lambda: tf.image.flip_left_right(label_image), lambda: label_image)
            input_image = tf.cond(sample > 0.5, lambda: tf.image.flip_left_right(input_image), lambda: input_image)
        with tf.name_scope('random_flip_ud'):
            sample = tf.random_uniform([], 0, 1)
            label_image = tf.cond(sample > 0.5, lambda: tf.image.flip_up_down(label_image), lambda: label_image)
            input_image = tf.cond(sample > 0.5, lambda: tf.image.flip_up_down(input_image), lambda: input_image)

        chanels = input_image.get_shape()[-1]
        input_image = tf.image.random_contrast(input_image, lower=0.8, upper=1.0)
        if chanels == 3:
            input_image = tf.image.random_hue(input_image, max_delta=0.1)
            input_image = tf.image.random_saturation(input_image, lower=0.8, upper=1.2)
        return input_image, label_image
项目:DocumentSegmentation    作者:SeguinBe    | 项目源码 | 文件源码
def rotate_crop(img, rotation, crop=True, interpolation='NEAREST'):
    with tf.name_scope('RotateCrop'):
        rotated_image = tf_rotate(img, rotation, interpolation)
        if crop:
            rotation = tf.abs(rotation)
            original_shape = tf.shape(rotated_image)[:2]
            h, w = original_shape[0], original_shape[1]
            # see https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders for formulae
            old_l, old_s = tf.cond(h > w, lambda: [h, w], lambda: [w, h])
            old_l, old_s = tf.cast(old_l, tf.float32), tf.cast(old_s, tf.float32)
            new_l = (old_l * tf.cos(rotation) - old_s * tf.sin(rotation)) / tf.cos(2 * rotation)
            new_s = (old_s - tf.sin(rotation) * new_l) / tf.cos(rotation)
            new_h, new_w = tf.cond(h > w, lambda: [new_l, new_s], lambda: [new_s, new_l])
            new_h, new_w = tf.cast(new_h, tf.int32), tf.cast(new_w, tf.int32)
            bb_begin = tf.cast(tf.ceil((h - new_h) / 2), tf.int32), tf.cast(tf.ceil((w - new_w) / 2), tf.int32)
            rotated_image_crop = rotated_image[bb_begin[0]:h - bb_begin[0], bb_begin[1]:w - bb_begin[1], :]

            # If crop removes the entire image, keep the original image
            rotated_image = tf.cond(tf.equal(tf.size(rotated_image_crop), 0),
                                    true_fn=lambda: img,
                                    false_fn=lambda: rotated_image_crop)
        return rotated_image
项目:3D_Dense_Transformer_Networks    作者:JohnYC1995    | 项目源码 | 文件源码
def dice_accuracy(decoded_predictions, annotations, class_nums):
    DiceRatio = tf.constant(0,tf.float32)
    misclassnum = tf.constant(0,tf.float32)
    class_num   = tf.constant(class_nums,tf.float32) 
    sublist   =  []
    for index in range(1,class_nums-2):
        current_annotation   =     tf.cast(tf.equal(tf.ones_like(annotations)*index,\
                                        annotations),tf.float32)
        cureent_prediction   =     tf.cast(tf.equal(tf.ones_like(decoded_predictions)*index,\
                                            decoded_predictions),tf.float32)
        Overlap              =     tf.add(current_annotation,cureent_prediction)
        Common               =     tf.reduce_sum(tf.cast(tf.equal(tf.ones_like(Overlap)*2,Overlap),\
                                                            tf.float32),[0,1,2,3])
        annotation_num       =     tf.reduce_sum(current_annotation,[0,1,2,3])
        predict_num          =     tf.reduce_sum(cureent_prediction,[0,1,2,3])
        all_num              =     tf.add(annotation_num,predict_num)
        Sub_DiceRatio        =     Common*2/tf.clip_by_value(all_num, 1e-10, 1e+10)
        misclassnum          =     tf.cond(tf.equal(Sub_DiceRatio,0.0), lambda: misclassnum + 1, lambda: misclassnum)
        sublist.append(Sub_DiceRatio)
        DiceRatio            =     DiceRatio + Sub_DiceRatio
    DiceRatio                =     DiceRatio/tf.clip_by_value(tf.cast((class_num-misclassnum-3),tf.float32),1e-10,1e+1000)
    return DiceRatio, sublist
项目:3D_Dense_Transformer_Networks    作者:JohnYC1995    | 项目源码 | 文件源码
def dice_accuracy(decoded_predictions, annotations, class_nums):
    DiceRatio = tf.constant(0,tf.float32)
    misclassnum = tf.constant(0,tf.float32)
    class_num   = tf.constant(class_nums,tf.float32) 
    sublist   =  []
    for index in range(1,class_nums-2):
        current_annotation   =     tf.cast(tf.equal(tf.ones_like(annotations)*index,\
                                        annotations),tf.float32)
        cureent_prediction   =     tf.cast(tf.equal(tf.ones_like(decoded_predictions)*index,\
                                            decoded_predictions),tf.float32)
        Overlap              =     tf.add(current_annotation,cureent_prediction)
        Common               =     tf.reduce_sum(tf.cast(tf.equal(tf.ones_like(Overlap)*2,Overlap),\
                                                            tf.float32),[0,1,2,3])
        annotation_num       =     tf.reduce_sum(current_annotation,[0,1,2,3])
        predict_num          =     tf.reduce_sum(cureent_prediction,[0,1,2,3])
        all_num              =     tf.add(annotation_num,predict_num)
        Sub_DiceRatio        =     0       
    Sub_DiceRatio        =     Common*2/tf.clip_by_value(all_num, 1e-10, 1e+10)
        misclassnum          =     tf.cond(tf.equal(Sub_DiceRatio,0.0), lambda: misclassnum + 1, lambda: misclassnum)
        sublist.append(Sub_DiceRatio)
    DiceRatio            =     DiceRatio + Sub_DiceRatio
    del Sub_DiceRatio
    DiceRatio                =     DiceRatio/tf.clip_by_value(tf.cast((class_num-misclassnum-3),tf.float32),1e-10,1e+1000)
    return DiceRatio, sublist
项目:bi-att-flow    作者:allenai    | 项目源码 | 文件源码
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
           is_train=None):
    if args is None or (nest.is_sequence(args) and not args):
        raise ValueError("`args` must be specified")
    if not nest.is_sequence(args):
        args = [args]

    flat_args = [flatten(arg, 1) for arg in args]
    if input_keep_prob < 1.0:
        assert is_train is not None
        flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg)
                     for arg in flat_args]
    flat_out = _linear(flat_args, output_size, bias, bias_start=bias_start, scope=scope)
    out = reconstruct(flat_out, args[0], 1)
    if squeeze:
        out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
    if wd:
        add_wd(wd)

    return out
项目:deep_unsupervised_posets    作者:asanakoy    | 项目源码 | 文件源码
def conv_relu(self, input_tensor, kernel_size, kernels_num, stride, batch_norm=True,
                  group=1, name=None):
        with tf.variable_scope(name) as scope:
            assert int(input_tensor.get_shape()[3]) % group == 0
            num_input_channels = int(input_tensor.get_shape()[3]) / group
            w, b = self.get_conv_weights(kernel_size, num_input_channels, kernels_num)
            conv = Convnet.conv(input_tensor, w, b, stride, padding="SAME", group=group)
            if batch_norm:
                conv = tf.cond(self.is_phase_train,
                               lambda: tflayers.batch_norm(conv,
                                                           decay=self.batch_norm_decay,
                                                           is_training=True,
                                                           trainable=True,
                                                           reuse=None,
                                                           scope=scope),
                               lambda: tflayers.batch_norm(conv,
                                                           decay=self.batch_norm_decay,
                                                           is_training=False,
                                                           trainable=True,
                                                           reuse=True,
                                                           scope=scope))
            conv = tf.nn.relu(conv, name=name)
        return conv
项目:YellowFin    作者:JianGoForIt    | 项目源码 | 文件源码
def update_hyper_param(self):
    assign_hyper_ops = []
    self._mu = tf.identity(tf.cond(
      self._do_tune, lambda: self.get_mu_tensor(),
      lambda: self._mu_var))
    with tf.control_dependencies([self._mu]):
      self._lr = tf.identity(tf.cond(
        self._do_tune, lambda: self.get_lr_tensor(),
        lambda: self._lr_var))

    with tf.control_dependencies([self._mu, self._lr]):
      if self._use_unsmoothed_lr_mu:
        assign_hyper_ops.append(tf.assign(self._mu_var, self._mu) )
        assign_hyper_ops.append(tf.assign(self._lr_var, self._lr) )
      else:
        self._mu = self._beta * self._mu_var + (1 - self._beta) * self._mu
        self._lr = self._beta * self._lr_var + (1 - self._beta) * self._lr
        with tf.control_dependencies([self._mu, self._lr] ):
          assign_hyper_ops.append(tf.assign(self._mu_var, self._mu) )
          assign_hyper_ops.append(tf.assign(self._lr_var, self._lr) )
    assign_hyper_op = tf.group(*assign_hyper_ops)
    return assign_hyper_op
项目:relaax    作者:deeplearninc    | 项目源码 | 文件源码
def build_graph(self):
        self.ph_local_step = tf.placeholder(tf.int64, [])
        self.ph_q_value = tf.placeholder(tf.float32, [None, dqn_config.config.output.action_size])

        if dqn_config.config.eps.stochastic:
            decay_steps = int(np.random.uniform(*dqn_config.config.eps.decay_steps))
        else:
            decay_steps = dqn_config.config.eps.decay_steps

        eps = tf.train.polynomial_decay(dqn_config.config.eps.initial,
                                        self.ph_local_step,
                                        decay_steps,
                                        dqn_config.config.eps.end)

        return tf.cond(tf.less(tf.random_uniform([]), eps),
                       lambda: tf.random_uniform([], 0, dqn_config.config.output.action_size, dtype=tf.int32),
                       lambda: tf.cast(tf.squeeze(tf.argmax(self.ph_q_value, axis=1)), tf.int32))
项目:supic    作者:Hirico    | 项目源码 | 文件源码
def read_image(self, image_path):
        # tf.decode_image does not return the image size, this is an ugly workaround to handle both jpeg and png
        path_length = string_length_tf(image_path)[0]
        file_extension = tf.substr(image_path, path_length - 3, 3)
        file_cond = tf.equal(file_extension, 'jpg')

        image  = tf.cond(file_cond, lambda: tf.image.decode_jpeg(tf.read_file(image_path)), lambda: tf.image.decode_png(tf.read_file(image_path)))

        # if the dataset is cityscapes, we crop the last fifth to remove the car hood
        if self.dataset == 'cityscapes':
            o_height    = tf.shape(image)[0]
            crop_height = (o_height * 4) / 5
            image  =  image[:crop_height,:,:]

        image  = tf.image.convert_image_dtype(image,  tf.float32)
        image  = tf.image.resize_images(image,  [self.params.height, self.params.width], tf.image.ResizeMethod.AREA)

        return image
项目:tefla    作者:litan    | 项目源码 | 文件源码
def static_cond(pred, fn1, fn2):
    """Return either fn1() or fn2() based on the boolean value of `pred`.

    Same signature as `control_flow_ops.cond()` but requires pred to be a bool.

    Args:
        pred: A value determining whether to return the result of `fn1` or `fn2`.
        fn1: The callable to be performed if pred is true.
        fn2: The callable to be performed if pred is false.

    Returns:
        Tensors returned by the call to either `fn1` or `fn2`.

    Raises:
        TypeError: if `fn1` or `fn2` is not callable.
    """
    if not callable(fn1):
        raise TypeError('fn1 must be callable.')
    if not callable(fn2):
        raise TypeError('fn2 must be callable.')
    if pred:
        return fn1()
    else:
        return fn2()
项目:tefla    作者:litan    | 项目源码 | 文件源码
def smart_cond(pred, fn1, fn2, name=None):
    """Return either fn1() or fn2() based on the boolean predicate/value `pred`.

    If `pred` is bool or has a constant value it would use `static_cond`,
     otherwise it would use `tf.cond`.

    Args:
        pred: A scalar determining whether to return the result of `fn1` or `fn2`.
        fn1: The callable to be performed if pred is true.
        fn2: The callable to be performed if pred is false.
        name: Optional name prefix when using tf.cond
    Returns:
        Tensors returned by the call to either `fn1` or `fn2`.
    """
    pred_value = constant_value(pred)
    if pred_value is not None:
        # Use static_cond if pred has a constant value.
        return static_cond(pred_value, fn1, fn2)
    else:
        # Use dynamic cond otherwise.
        return control_flow_ops.cond(pred, fn1, fn2, name)
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def decode_from_ternary_gradients(grads_and_vars, scalers, shapes):
  """Decode each gradient tensor."""
  with tf.name_scope('ternary_decoder'):
    gradients, variables = zip(*grads_and_vars)
    floating_gradients = []
    for gradient, variable, scaler, shape in zip(gradients, variables, scalers, shapes):
      if gradient is None:
        floating_gradients.append(None)
      # gradient is encoded, so we use variable to check its size
      # We also assume dtype of variable and gradient is the same
      floating_gradient = tf.cond(tf.size(variable) < FLAGS.size_to_binarize,
                                 lambda: tf.bitcast(gradient, variable.dtype),
                                 lambda: ternary_decoder(gradient, scaler, shape))
      floating_gradients.append(floating_gradient)

    return list(zip(floating_gradients, variables))
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def clip_gradients_by_stddev(grads_and_vars, clip_factor = 2.5):
    """ Clip gradients to [-clip_factor*stddev, clip_factor*stddev]."""
    gradients, variables = zip(*grads_and_vars)
    clipped_gradients = []
    for gradient in gradients:
        if gradient is None:
            clipped_gradients.append(None)
            continue

        mean_gradient = tf.reduce_mean(gradient)
        stddev_gradient = tf.sqrt(tf.reduce_mean(tf.square(gradient - mean_gradient)))
        #clipped_gradient = tf.clip_by_value(gradient, -clip_factor * stddev_gradient, clip_factor * stddev_gradient)
        clipped_gradient = tf.cond(tf.size(gradient) < FLAGS.size_to_binarize,
                               lambda: gradient,
                               lambda: tf.clip_by_value(gradient, -clip_factor * stddev_gradient, clip_factor * stddev_gradient))

        clipped_gradients.append(clipped_gradient)
    return list(zip(clipped_gradients, variables))
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def tf_random_aspect_resize(image, label, low_val=1.0, upper_val=1.5):
  shape = tf.shape(image)
  height = shape[0]
  width = shape[1]

  # 1~1.5
  which_side = tf.to_float(tf.random_uniform([1]))[0]
  multi_val = tf.to_float(tf.random_uniform([1]))[0] * (upper_val - low_val) + low_val

  new_height = tf.cond(which_side > 0.5, lambda: tf.to_float(height), lambda: tf.to_float(height) * multi_val)
  new_width = tf.cond(which_side <= 0.5, lambda: tf.to_float(width), lambda: tf.to_float(width) * multi_val)

  new_height = tf.to_int32(new_height)
  new_width = tf.to_int32(new_width)

  image = tf.expand_dims(image, 0)
  label = tf.expand_dims(label, 0)
  resized_image = tf.image.resize_bilinear(image, [new_height, new_width], align_corners=False)
  resized_image = tf.cast(resized_image, tf.uint8)
  resized_label = tf.image.resize_nearest_neighbor(label, [new_height, new_width], align_corners=False)
  resized_label = tf.cast(resized_label, tf.uint8)
  resized_image = tf.squeeze(resized_image, 0)
  resized_label = tf.squeeze(resized_label, 0)
  return resized_image, resized_label
项目:Chinese-QA    作者:distantJing    | 项目源码 | 文件源码
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
           is_train=None):
    if args is None or (nest.is_sequence(args) and not args):
        raise ValueError("`args` must be specified")
    if not nest.is_sequence(args):
        args = [args]

    flat_args = [flatten(arg, 1) for arg in args]
    if input_keep_prob < 1.0:
        assert is_train is not None
        flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg)
                     for arg in flat_args]
    flat_out = _linear(flat_args, output_size, bias, bias_start=bias_start, scope=scope)
    out = reconstruct(flat_out, args[0], 1)
    if squeeze:
        out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
    if wd:
        add_wd(wd)

    return out
项目:LiTeFlow    作者:petrux    | 项目源码 | 文件源码
def next_inp(self, time, output):
        """Returns the next input.

        Arguments:
          time: a `int` or unit `Tensor` representing the current timestep.
          output: a `2D Tensor` of shape `[batch_size, output_size]` representing
            the current output.

        *NOTE* that at time `t+1` the desired decoder input is the output
        from the previous step, `t`, it means that at timestep `t` the next
        input is the desired output for the very same timestep, if decoder
        inputs have been provided -- otherwise is just the current output.
        """
        if self._inputs_ta:
            output = tf.cond(
                time < self._inputs_ta.size(),
                lambda: self._inputs_ta.read(time),
                lambda: self.zero_output())  # pylint: disable=W0108
        next_inp = ops.fit(output, self._inp_size)
        return next_inp
项目:FCN-GoogLeNet    作者:DeepSegment    | 项目源码 | 文件源码
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5):
    """
    Code taken from http://stackoverflow.com/a/34634291/2267819
    """
    with tf.variable_scope(scope):
        beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
                               , trainable=True)
        gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, 0.02),
                                trainable=True)
        batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(phase_train,
                            mean_var_with_update,
                            lambda: (ema.average(batch_mean), ema.average(batch_var)))
        normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
    return normed
项目:Graph-CNN    作者:fps7806    | 项目源码 | 文件源码
def create_loss_function(self):
        with tf.variable_scope('loss') as scope:
            self.print_ext('Creating loss function and summaries')
            cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.net.current_V, labels=self.net.labels))

            correct_prediction = tf.cast(tf.equal(tf.argmax(self.net.current_V, 1), self.net.labels), tf.float32)
            accuracy = tf.reduce_mean(correct_prediction)

            # we have 2 variables that will keep track of the best accuracy obtained in training/testing batch
            # SHOULD ONLY BE USED IF test_batch_size == ALL TEST SAMPLES
            self.max_acc_train = tf.Variable(tf.zeros([]), name="max_acc_train")
            self.max_acc_test = tf.Variable(tf.zeros([]), name="max_acc_test")
            max_acc = tf.cond(self.net.is_training, lambda: tf.assign(self.max_acc_train, tf.maximum(self.max_acc_train, accuracy)), lambda: tf.assign(self.max_acc_test, tf.maximum(self.max_acc_test, accuracy)))

            tf.add_to_collection('losses', cross_entropy)
            tf.summary.scalar('accuracy', accuracy)
            tf.summary.scalar('max_accuracy', max_acc)
            tf.summary.scalar('cross_entropy', cross_entropy)

            # if silent == false display these statistics:
            self.reports['accuracy'] = accuracy
            self.reports['max acc.'] = max_acc
            self.reports['cross_entropy'] = cross_entropy

    # check if the model has a saved iteration and return the latest iteration step
项目:Graph-CNN    作者:fps7806    | 项目源码 | 文件源码
def create_data(self):
        with tf.device("/cpu:0"):
            with tf.variable_scope('input') as scope:
                self.print_ext('Creating training Tensorflow Tensors')

                vertices = self.graph_vertices[:, self.train_idx, :]
                adjacency = self.graph_adjacency[:, self.train_idx, :, :]
                adjacency = adjacency[:, :, :, self.train_idx]
                labels = self.graph_labels[:, self.train_idx]
                input_mask = np.ones([1, len(self.train_idx), 1]).astype(np.float32)

                train_input = [vertices, adjacency, labels, input_mask]
                train_input = self.create_input_variable(train_input)

                vertices = self.graph_vertices
                adjacency = self.graph_adjacency
                labels = self.graph_labels

                input_mask = np.zeros([1, self.largest_graph, 1]).astype(np.float32)
                input_mask[:, self.test_idx, :] = 1
                test_input = [vertices, adjacency, labels, input_mask]
                test_input = self.create_input_variable(test_input)

                return tf.cond(self.net.is_training, lambda: train_input, lambda: test_input)
项目:Graph-CNN    作者:fps7806    | 项目源码 | 文件源码
def make_bn(input, phase, axis=-1, epsilon=0.001, mask=None, num_updates=None, name=None):
    default_decay = GraphCNNGlobal.BN_DECAY
    with tf.variable_scope(name, default_name='BatchNorm') as scope:
        input_size = input.get_shape()[axis].value
        if axis == -1:
            axis = len(input.get_shape())-1
        axis_arr = [i for i in range(len(input.get_shape())) if i != axis]
        if mask == None:
            batch_mean, batch_var = tf.nn.moments(input, axis_arr)
        else:
            batch_mean, batch_var = tf.nn.weighted_moments(input, axis_arr, mask)
        gamma = make_variable('gamma', input_size, initializer=tf.constant_initializer(1))
        beta = make_bias_variable('bias', input_size)
        ema = tf.train.ExponentialMovingAverage(decay=default_decay, num_updates=num_updates)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)
        mean, var = tf.cond(phase, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var)))

        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
项目:EBGAN.tensorflow    作者:shekkizh    | 项目源码 | 文件源码
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5, stddev=0.02):
    """
    Code taken from http://stackoverflow.com/a/34634291/2267819
    """
    with tf.variable_scope(scope):
        beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
                               , trainable=True)
        gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, stddev),
                                trainable=True)
        batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(phase_train,
                            mean_var_with_update,
                            lambda: (ema.average(batch_mean), ema.average(batch_var)))
        normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
    return normed
项目:GELUs    作者:hendrycks    | 项目源码 | 文件源码
def model(data_feed):
        h1 = f(tf.matmul(data_feed, w1) + b1)
        h1 = tf.cond(is_training, lambda: tf.nn.dropout(h1, p), lambda: h1)
        h2 = f(tf.matmul(h1, w2) + b2)
        h2 = tf.cond(is_training, lambda: tf.nn.dropout(h2, p), lambda: h2)
        return tf.matmul(h2, w_out) + b_out
项目:GELUs    作者:hendrycks    | 项目源码 | 文件源码
def feedforward(x):
        h1 = f(tf.matmul(x, W['1']) + b['1'])
        h1 = tf.cond(is_training, lambda: tf.nn.dropout(h1, p), lambda: h1)
        h2 = f(tf.matmul(h1, W['2']) + b['2'])
        h2 = tf.cond(is_training, lambda: tf.nn.dropout(h2, p), lambda: h2)
        h3 = f(tf.matmul(h2, W['3']) + b['3'])
        h3 = tf.cond(is_training, lambda: tf.nn.dropout(h3, p), lambda: h3)
        h4 = f(tf.matmul(h3, W['4']) + b['4'])
        h4 = tf.cond(is_training, lambda: tf.nn.dropout(h4, p), lambda: h4)
        h5 = f(tf.matmul(h4, W['5']) + b['5'])
        h5 = tf.cond(is_training, lambda: tf.nn.dropout(h5, p), lambda: h5)

        return tf.matmul(h5, W['6']) + b['6']
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def _beam_where(self, cond, x, y):
        assert x.shape.is_compatible_with(y.shape)
        original_static_shape = x.shape
        cond = tf.reshape(cond, [self.batch_size * self._beam_width])
        x = self._merge_batch_beams(x, original_static_shape[2:])
        y = self._merge_batch_beams(y, original_static_shape[2:])
        return self._split_batch_beams(tf.where(cond, x, y), original_static_shape[2:])