Python tensorflow 模块,variable_op_scope() 实例源码

我们从Python开源项目中,提取了以下45个代码示例,用于说明如何使用tensorflow.variable_op_scope()

项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
项目:FractalNet    作者:tensorpro    | 项目源码 | 文件源码
def join(columns,
         coin):
  """Takes mean of the columns, applies drop path if
     `tflearn.get_training_mode()` is True.

  Args:
    columns: columns of fractal block.
    is_training: boolean in tensor form. Determines whether drop path
      should be used.
    coin: boolean in tensor form. Determines whether drop path is
     local or global.
  """
  if len(columns)==1:
    return columns[0]
  with tf.variable_op_scope(columns, None, "Join"):
    columns = tf.convert_to_tensor(columns)
    columns = tf.cond(tflearn.get_training_mode(),
                      lambda: drop_path(columns, coin),
                      lambda: columns)
    out = tf.reduce_mean(columns, 0)
  return out
项目:BinaryNet.tf    作者:itayhubara    | 项目源码 | 文件源码
def BinarizedSpatialConvolution(nOutputPlane, kW, kH, dW=1, dH=1,
        padding='VALID', bias=True, reuse=None, name='BinarizedSpatialConvolution'):
    def b_conv2d(x, is_training=True):
        nInputPlane = x.get_shape().as_list()[3]
        with tf.variable_op_scope([x], None, name, reuse=reuse):
            w = tf.get_variable('weight', [kH, kW, nInputPlane, nOutputPlane],
                            initializer=tf.contrib.layers.xavier_initializer_conv2d())
            bin_w = binarize(w)
            bin_x = binarize(x)
            '''
            Note that we use binarized version of the input and the weights. Since the binarized function uses STE
            we calculate the gradients using bin_x and bin_w but we update w (the full precition version).
            '''
            out = tf.nn.conv2d(bin_x, bin_w, strides=[1, dH, dW, 1], padding=padding)
            if bias:
                b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
                out = tf.nn.bias_add(out, b)
            return out
    return b_conv2d
项目:BinaryNet.tf    作者:itayhubara    | 项目源码 | 文件源码
def BinarizedWeightOnlySpatialConvolution(nOutputPlane, kW, kH, dW=1, dH=1,
        padding='VALID', bias=True, reuse=None, name='BinarizedWeightOnlySpatialConvolution'):
    '''
    This function is used only at the first layer of the model as we dont want to binarized the RGB images
    '''
    def bc_conv2d(x, is_training=True):
        nInputPlane = x.get_shape().as_list()[3]
        with tf.variable_op_scope([x], None, name, reuse=reuse):
            w = tf.get_variable('weight', [kH, kW, nInputPlane, nOutputPlane],
                            initializer=tf.contrib.layers.xavier_initializer_conv2d())
            bin_w = binarize(w)
            out = tf.nn.conv2d(x, bin_w, strides=[1, dH, dW, 1], padding=padding)
            if bias:
                b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
                out = tf.nn.bias_add(out, b)
            return out
    return bc_conv2d
项目:BinaryNet.tf    作者:itayhubara    | 项目源码 | 文件源码
def BinarizedAffine(nOutputPlane, bias=True, name=None, reuse=None):
    def b_affineLayer(x, is_training=True):
        with tf.variable_op_scope([x], name, 'Affine', reuse=reuse):
            '''
            Note that we use binarized version of the input (bin_x) and the weights (bin_w). Since the binarized function uses STE
            we calculate the gradients using bin_x and bin_w but we update w (the full precition version).
            '''
            bin_x = binarize(x)
            reshaped = tf.reshape(bin_x, [x.get_shape().as_list()[0], -1])
            nInputPlane = reshaped.get_shape().as_list()[1]
            w = tf.get_variable('weight', [nInputPlane, nOutputPlane], initializer=tf.contrib.layers.xavier_initializer())
            bin_w = binarize(w)
            output = tf.matmul(reshaped, bin_w)
            if bias:
                b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
                output = tf.nn.bias_add(output, b)
        return output
    return b_affineLayer
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
项目:piecewisecrf    作者:Vaan5    | 项目源码 | 文件源码
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
项目:the-neural-perspective    作者:GokuMohandas    | 项目源码 | 文件源码
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
项目:darkskies-challenge    作者:LiberiFatali    | 项目源码 | 文件源码
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
项目:dcn.tf    作者:beopst    | 项目源码 | 文件源码
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
项目:tensorflow_web_deploy    作者:hetaoaoao    | 项目源码 | 文件源码
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
项目:inception_v3    作者:Cyber-Neuron    | 项目源码 | 文件源码
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
项目:Net2Net    作者:paengs    | 项目源码 | 文件源码
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
项目:FractalNet    作者:tensorpro    | 项目源码 | 文件源码
def coin_flip(prob=.5):
  """Random boolean variable, with `prob` chance of being true.

  Used to choose between local and global drop path.

  Args:
    prob:float, probability of being True.
  """
  with tf.variable_op_scope([],None,"CoinFlip"):
    coin = tf.random_uniform([1])[0]>prob
  return coin
项目:FractalNet    作者:tensorpro    | 项目源码 | 文件源码
def drop_path(columns,
              coin):
  with tf.variable_op_scope([columns], None, "DropPath"):
    out = tf.cond(coin,
                  lambda : drop_some(columns),
                  lambda : random_column(columns))
  return out
项目:BinaryNet.tf    作者:itayhubara    | 项目源码 | 文件源码
def SpatialConvolution(nOutputPlane, kW, kH, dW=1, dH=1,
        padding='VALID', bias=True, reuse=None, name='SpatialConvolution'):
    def conv2d(x, is_training=True):
        nInputPlane = x.get_shape().as_list()[3]
        with tf.variable_op_scope([x], None, name, reuse=reuse):
            w = tf.get_variable('weight', [kH, kW, nInputPlane, nOutputPlane],
                            initializer=tf.contrib.layers.xavier_initializer_conv2d())
            out = tf.nn.conv2d(x, w, strides=[1, dH, dW, 1], padding=padding)
            if bias:
                b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
                out = tf.nn.bias_add(out, b)
            return out
    return conv2d
项目:BinaryNet.tf    作者:itayhubara    | 项目源码 | 文件源码
def Affine(nOutputPlane, bias=True, name=None, reuse=None):
    def affineLayer(x, is_training=True):
        with tf.variable_op_scope([x], name, 'Affine', reuse=reuse):
            reshaped = tf.reshape(x, [x.get_shape().as_list()[0], -1])
            nInputPlane = reshaped.get_shape().as_list()[1]
            w = tf.get_variable('weight', [nInputPlane, nOutputPlane], initializer=tf.contrib.layers.xavier_initializer())
            output = tf.matmul(reshaped, w)
            if bias:
                b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
                output = tf.nn.bias_add(output, b)
        return output
    return affineLayer
项目:BinaryNet.tf    作者:itayhubara    | 项目源码 | 文件源码
def Dropout(p, name='Dropout'):
    def dropout_layer(x, is_training=True):
        with tf.variable_op_scope([x], None, name):
            # def drop(): return tf.nn.dropout(x,p)
            # def no_drop(): return x
            # return tf.cond(is_training, drop, no_drop)
            if is_training:
                return tf.nn.dropout(x,p)
            else:
                return x
    return dropout_layer
项目:BinaryNet.tf    作者:itayhubara    | 项目源码 | 文件源码
def ReLU(name='ReLU'):
    def layer(x, is_training=True):
        with tf.variable_op_scope([x], None, name):
            return tf.nn.relu(x)
    return layer
项目:BinaryNet.tf    作者:itayhubara    | 项目源码 | 文件源码
def HardTanh(name='HardTanh'):
    def layer(x, is_training=True):
        with tf.variable_op_scope([x], None, name):
            return tf.clip_by_value(x,-1,1)
    return layer
项目:BinaryNet.tf    作者:itayhubara    | 项目源码 | 文件源码
def View(shape, name='View'):
    with tf.variable_op_scope([x], None, name, reuse=reuse):
        return wrapNN(tf.reshape,shape=shape)
项目:BinaryNet.tf    作者:itayhubara    | 项目源码 | 文件源码
def SpatialMaxPooling(kW, kH=None, dW=None, dH=None, padding='VALID',
            name='SpatialMaxPooling'):
    kH = kH or kW
    dW = dW or kW
    dH = dH or kH
    def max_pool(x,is_training=True):
        with tf.variable_op_scope([x], None, name):
              return tf.nn.max_pool(x, ksize=[1, kW, kH, 1], strides=[1, dW, dH, 1], padding=padding)
    return max_pool
项目:BinaryNet.tf    作者:itayhubara    | 项目源码 | 文件源码
def Sequential(moduleList):
    def model(x, is_training=True):
    # Create model
        output = x
        #with tf.variable_op_scope([x], None, name):
        for i,m in enumerate(moduleList):
            output = m(output, is_training=is_training)
            tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, output)
        return output
    return model
项目:BinaryNet.tf    作者:itayhubara    | 项目源码 | 文件源码
def Concat(moduleList, dim=3):
    def model(x, is_training=True):
    # Create model
        outputs = []
        for i,m in enumerate(moduleList):
            name = 'layer_'+str(i)
            with tf.variable_op_scope([x], name, 'Layer', reuse=reuse):
                outputs[i] = m(x, is_training=is_training)
            output = tf.concat(dim, outputs)
        return output
    return model
项目:BinaryNet.tf    作者:itayhubara    | 项目源码 | 文件源码
def Residual(moduleList, name='Residual'):
    m = Sequential(moduleList)
    def model(x, is_training=True):
    # Create model
        with tf.variable_op_scope([x], None, name):
            output = tf.add(m(x, is_training=is_training), x)
            return output
    return model
项目:the-neural-perspective    作者:johnsonc    | 项目源码 | 文件源码
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
项目:icnn    作者:locuslab    | 项目源码 | 文件源码
def policy(obs, theta, name='policy'):
    with tf.variable_op_scope([obs], name, name):
        h0 = tf.identity(obs, name='h0-obs')
        h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name='h1')
        h2 = tf.nn.relu(tf.matmul(h1, theta[2]) + theta[3], name='h2')
        h3 = tf.identity(tf.matmul(h2, theta[4]) + theta[5], name='h3')
        action = tf.nn.tanh(h3, name='h4-action')
        return action
项目:icnn    作者:locuslab    | 项目源码 | 文件源码
def qfunction(obs, act, theta, name="qfunction"):
    with tf.variable_op_scope([obs, act], name, name):
        h0 = tf.identity(obs, name='h0-obs')
        h0a = tf.identity(act, name='h0-act')
        h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name='h1')
        h1a = tf.concat(1, [h1, act])
        h2 = tf.nn.relu(tf.matmul(h1a, theta[2]) + theta[3], name='h2')
        qs = tf.matmul(h2, theta[4]) + theta[5]
        q = tf.squeeze(qs, [1], name='h3-q')
        return q
项目:DDPG-tensorflow    作者:songrotek    | 项目源码 | 文件源码
def policy_network(state,theta,name='policy'):
  with tf.variable_op_scope([state],name,name):
    h0 = tf.identity(state,name='h0-state')
    h1 = tf.nn.relu( tf.matmul(h0,theta[0]) + theta[1],name='h1')
    h2 = tf.nn.relu( tf.matmul(h1,theta[2]) + theta[3],name='h2')
    h3 = tf.identity(tf.matmul(h2,theta[4]) + theta[5],name='h3')
    action = tf.nn.tanh(h3,name='h4-action')
    return action
项目:DDPG-tensorflow    作者:songrotek    | 项目源码 | 文件源码
def q_network(state,action,theta, name="q_network"):
  with tf.variable_op_scope([state,action],name,name):
    h0 = tf.identity(state,name='h0-state')
    h0a = tf.identity(action,name='h0-act')
    h1  = tf.nn.relu( tf.matmul(h0,theta[0]) + theta[1],name='h1')
    h1a = tf.concat(1,[h1,action])
    h2  = tf.nn.relu( tf.matmul(h1a,theta[2]) + theta[3],name='h2')
    qs  = tf.matmul(h2,theta[4]) + theta[5]
    q = tf.squeeze(qs,[1],name='h3-q')

    return q
项目:deeplearning-benchmark    作者:awslabs    | 项目源码 | 文件源码
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
项目:FractalNet    作者:tensorpro    | 项目源码 | 文件源码
def fractal_template(inputs,
                     num_columns,
                     block_fn,
                     block_asc,
                     joined=True,
                     is_training=True,
                     reuse=False,
                     scope=None):
  """Template for making fractal blocks.

  Given a function and a corresponding arg_scope `fractal_template`
  will build a truncated fractal with `num_columns` columns.

  Args:
    inputs: a 4-D tensor  `[batch_size, height, width, channels]`.
    num_columns: integer, the columns in the fractal.
    block_fn: function to be called within each fractal.
    block_as: A function that returns argscope for `block_fn`.
    joined: boolean, whether the output columns should be joined.
    reuse: whether or not the layer and its variables should be reused. To be
      able to reuse the layer scope must be given.
    scope: Optional scope for `variable_scope`.
  """

  def fractal_expand(inputs, num_columns, joined):
    '''Recursive Helper Function for making fractal'''
    with block_asc():
      output = lambda cols: join(cols, coin) if joined else cols
      if num_columns == 1:
        return output([block_fn(inputs)])
      left = block_fn(inputs)
      right = fractal_expand(inputs, num_columns-1, joined=True)
      right = fractal_expand(right, num_columns-1, joined=False)
      cols=[left]+right
    return output(cols)

  with tf.variable_op_scope([inputs], scope, 'Fractal',
                            reuse=reuse) as scope:
    coin = coin_flip()
    net=fractal_expand(inputs, num_columns, joined)

  return net