Python tensorflow.contrib.layers 模块,dropout() 实例源码

我们从Python开源项目中,提取了以下32个代码示例,用于说明如何使用tensorflow.contrib.layers.dropout()

项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def q_net(x, n_xl, n_z, n_particles, is_training):
    with zs.BayesianNet() as variational:
        normalizer_params = {'is_training': is_training,
                             'updates_collections': None}
        lz_x = tf.reshape(tf.to_float(x), [-1, n_xl, n_xl, 1])
        lz_x = layers.conv2d(
            lz_x, 32, kernel_size=5, stride=2,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.conv2d(
            lz_x, 64, kernel_size=5, stride=2,
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.conv2d(
            lz_x, 128, kernel_size=5, padding='VALID',
            normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        lz_x = layers.dropout(lz_x, keep_prob=0.9, is_training=is_training)
        lz_x = tf.reshape(lz_x, [-1, 128 * 3 * 3])
        lz_mean = layers.fully_connected(lz_x, n_z, activation_fn=None)
        lz_logstd = layers.fully_connected(lz_x, n_z, activation_fn=None)
        z = zs.Normal('z', lz_mean, logstd=lz_logstd, n_samples=n_particles,
                      group_ndims=1)
    return variational
项目:kaggle_redefining_cancer_treatment    作者:jorgemf    | 项目源码 | 文件源码
def doc2vec_prediction_model(input_vectors, input_gene, input_variation, output_label, batch_size,
                             is_training, embedding_size, output_classes):
    # inputs/outputs
    input_vectors = tf.reshape(input_vectors, [batch_size, embedding_size])
    input_gene = tf.reshape(input_gene, [batch_size, embedding_size])
    input_variation = tf.reshape(input_variation, [batch_size, embedding_size])
    targets = None
    if output_label is not None:
        output_label = tf.reshape(output_label, [batch_size, 1])
        targets = tf.one_hot(output_label, axis=-1, depth=output_classes, on_value=1.0,
                             off_value=0.0)
        targets = tf.squeeze(targets, axis=1)

    net = tf.concat([input_vectors, input_gene, input_variation], axis=1)
    net = layers.fully_connected(net, embedding_size * 2, activation_fn=tf.nn.relu)
    net = layers.dropout(net, keep_prob=0.85, is_training=is_training)
    net = layers.fully_connected(net, embedding_size, activation_fn=tf.nn.relu)
    net = layers.dropout(net, keep_prob=0.85, is_training=is_training)
    net = layers.fully_connected(net, embedding_size // 4, activation_fn=tf.nn.relu)
    logits = layers.fully_connected(net, output_classes, activation_fn=None)

    return logits, targets
项目:kaggle_redefining_cancer_treatment    作者:jorgemf    | 项目源码 | 文件源码
def rnn(self, sequence, sequence_length, max_length, dropout, batch_size, training,
            num_hidden=TC_MODEL_HIDDEN, num_layers=TC_MODEL_LAYERS):
        # Recurrent network.
        cells = []
        for _ in range(num_layers):
            cell = tf.nn.rnn_cell.GRUCell(num_hidden)
            if training:
                cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=dropout)
            cells.append(cell)
        network = tf.nn.rnn_cell.MultiRNNCell(cells)
        type = sequence.dtype

        sequence_output, _ = tf.nn.dynamic_rnn(network, sequence, dtype=tf.float32,
                                               sequence_length=sequence_length,
                                               initial_state=network.zero_state(batch_size, type))
        # get last output of the dynamic_rnn
        sequence_output = tf.reshape(sequence_output, [batch_size * max_length, num_hidden])
        indexes = tf.range(batch_size) * max_length + (sequence_length - 1)
        output = tf.gather(sequence_output, indexes)
        return output
项目:gait-recognition    作者:marian-margeta    | 项目源码 | 文件源码
def get_arg_scope(is_training):
        weight_decay_l2 = 0.1
        batch_norm_decay = 0.999
        batch_norm_epsilon = 0.0001

        with slim.arg_scope([slim.conv2d, slim.fully_connected, layers.separable_convolution2d],
                            weights_regularizer = slim.l2_regularizer(weight_decay_l2),
                            biases_regularizer = slim.l2_regularizer(weight_decay_l2),
                            weights_initializer = layers.variance_scaling_initializer(),
                            ):
            batch_norm_params = {
                'decay': batch_norm_decay,
                'epsilon': batch_norm_epsilon
            }
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training = is_training):
                with slim.arg_scope([slim.batch_norm],
                                    **batch_norm_params):
                    with slim.arg_scope([slim.conv2d, layers.separable_convolution2d, layers.fully_connected],
                                        activation_fn = tf.nn.elu,
                                        normalizer_fn = slim.batch_norm,
                                        normalizer_params = batch_norm_params) as scope:
                        return scope
项目:googlenet    作者:da-steve101    | 项目源码 | 文件源码
def aux_logit_layer( inputs, num_classes, is_training ):
    with tf.variable_scope("pool2d"):
        pooled = layers.avg_pool2d(inputs, [ 5, 5 ], stride = 3 )
    with tf.variable_scope("conv11"):
        conv11 = layers.conv2d( pooled, 128, [1, 1] )
    with tf.variable_scope("flatten"):
        flat = tf.reshape( conv11, [-1, 2048] )
    with tf.variable_scope("fc"):
        fc = layers.fully_connected( flat, 1024, activation_fn=None )
    with tf.variable_scope("drop"):
        drop = layers.dropout( fc, 0.3, is_training = is_training )
    with tf.variable_scope( "linear" ):
        linear = layers.fully_connected( drop, num_classes, activation_fn=None )
    with tf.variable_scope("soft"):
        soft = tf.nn.softmax( linear )
    return soft
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def _block_output(net, endpoints, num_classes, dropout_keep_prob=0.5):
    with tf.variable_scope('Output'):
        net = layers.flatten(net, scope='Flatten')

        # 7 x 7 x 512
        net = layers.fully_connected(net, 4096, scope='Fc1')
        net = endpoints['Output/Fc1'] = layers.dropout(net, dropout_keep_prob, scope='Dropout1')

        # 1 x 1 x 4096
        net = layers.fully_connected(net, 4096, scope='Fc2')
        net = endpoints['Output/Fc2'] = layers.dropout(net, dropout_keep_prob, scope='Dropout2')

        logits = layers.fully_connected(net, num_classes, activation_fn=None, scope='Logits')
        # 1 x 1 x num_classes
        endpoints['Logits'] = logits
    return logits
项目:cancer    作者:yancz1989    | 项目源码 | 文件源码
def model(H, x, training):
  net = dropout(x, 0.5, is_training = training)
  # net = conv2d(net, 64, [3, 3], activation_fn = tf.nn.relu)
  # net = conv2d(net, 64, [3, 3], activation_fn = tf.nn.relu)
  # net = max_pool2d(net, [2, 2], padding = 'VALID')
  # net = conv2d(net, 128, [3, 3], activation_fn = tf.nn.relu)
  # net = conv2d(net, 128, [3, 3], activation_fn = tf.nn.relu)
  # net = max_pool2d(net, [2, 2], padding = 'VALID')
  # ksize = net.get_shape().as_list()
  # net = max_pool2d(net, [ksize[1], ksize[2]])
  net = fully_connected(flatten(net), 256, activation_fn = tf.nn.relu)
  net = dropout(net, 0.5, is_training = training)
  logits = fully_connected(net, 1, activation_fn = tf.nn.sigmoid)
  preds = tf.cast(tf.greater(logits, 0.5), tf.int64)
  return logits, preds
项目:kaggle_redefining_cancer_treatment    作者:jorgemf    | 项目源码 | 文件源码
def model_fully_connected(self, output, gene, variation, num_output_classes, dropout, training):
        output = layers.dropout(output, keep_prob=dropout, is_training=training)
        net = tf.concat([output, gene, variation], axis=1)
        net = layers.fully_connected(net, 128, activation_fn=tf.nn.relu)
        net = layers.dropout(net, keep_prob=dropout, is_training=training)
        logits = layers.fully_connected(net, num_output_classes, activation_fn=None)
        return logits
项目:kaggle_redefining_cancer_treatment    作者:jorgemf    | 项目源码 | 文件源码
def _han(self, input_words, embeddings, gene, variation, batch_size, embeddings_size,
             num_hidden, dropout, word_output_size, sentence_output_size, training=True):

        input_words = tf.reshape(input_words, [batch_size, MAX_SENTENCES, MAX_WORDS_IN_SENTENCE])
        embedded_sequence, sentences_length, words_length = \
            self._embed_sequence_with_length(embeddings, input_words)
        _, sentence_size, word_size, _ = tf.unstack(tf.shape(embedded_sequence))

        # RNN word level
        with tf.variable_scope('word_level'):
            word_level_inputs = tf.reshape(embedded_sequence,
                                           [batch_size * sentence_size, word_size, embeddings_size])
            word_level_lengths = tf.reshape(words_length, [batch_size * sentence_size])

            word_level_output = self._bidirectional_rnn(word_level_inputs, word_level_lengths,
                                                        num_hidden)
            word_level_output = tf.reshape(word_level_output, [batch_size, sentence_size, word_size,
                                                               num_hidden * 2])
            word_level_output = self._attention(word_level_output, word_output_size, gene,
                                                variation)
            word_level_output = layers.dropout(word_level_output, keep_prob=dropout,
                                               is_training=training)
        # RNN sentence level
        with tf.variable_scope('sentence_level'):
            sentence_level_inputs = tf.reshape(word_level_output,
                                               [batch_size, sentence_size, word_output_size])
            sentence_level_output = self._bidirectional_rnn(sentence_level_inputs, sentences_length,
                                                            num_hidden)
            sentence_level_output = self._attention(sentence_level_output, sentence_output_size,
                                                    gene, variation)
            sentence_level_output = layers.dropout(sentence_level_output, keep_prob=dropout,
                                                   is_training=training)

        return sentence_level_output
项目:opinatt    作者:epochx    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    if isinstance(self.state_size, tuple) != isinstance(self._zoneout_prob, tuple):
      raise TypeError("Subdivided states need subdivided zoneouts.")
    if isinstance(self.state_size, tuple) and len(tuple(self.state_size)) != len(tuple(self._zoneout_prob)):
      raise ValueError("State and zoneout need equally many parts.")
    output, new_state = self._cell(inputs, state, scope)
    if isinstance(self.state_size, tuple):
      if self.is_training:
        new_state = self._tuple([(1 - state_part_zoneout_prob) * dropout(
          new_state_part - state_part, (1 - state_part_zoneout_prob)) + state_part
                          for new_state_part, state_part, state_part_zoneout_prob in
                          zip(new_state, state, self._zoneout_prob)])
      else:
        new_state = self._tuple([state_part_zoneout_prob * state_part + (1 - state_part_zoneout_prob) * new_state_part
                          for new_state_part, state_part, state_part_zoneout_prob in
                          zip(new_state, state, self._zoneout_prob)])
    else:
      if self.is_training:
        new_state = (1 - state_part_zoneout_prob) * dropout(
          new_state_part - state_part, (1 - state_part_zoneout_prob)) + state_part
      else:
        new_state = state_part_zoneout_prob * state_part + (1 - state_part_zoneout_prob) * new_state_part
    return output, new_state

# # Wrap your cells like this
# cell = ZoneoutWrapper(tf.nn.rnn_cell.LSTMCell(hidden_units, initializer=random_uniform(), state_is_tuple=True),
# zoneout_prob=(z_prob_cells, z_prob_states))
项目:merlin    作者:CSTR-Edinburgh    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):

        return tf.cond(self.is_training,\
                    lambda: DropoutWrapper(self._cell,self._input_keep_prob,self._output_keep_prob).__call__(inputs,state,scope=None),\
                    lambda: DropoutWrapper(self._cell,1.0,1.0).__call__(inputs,state,scope=None))
           #return self._cell(dropout(inputs,self._input_keep_prob,is_training=self.is_training,scope=None),state,scope=None)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               num_label_columns,
               hidden_units,
               optimizer=None,
               activation_fn=nn.relu,
               dropout=None,
               gradient_clip_norm=None,
               num_ps_replicas=0,
               scope=None):
    """Initializes DNNComposableModel objects.

    Args:
      num_label_columns: The number of label/target columns.
      hidden_units: List of hidden units per layer. All layers are fully
        connected.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      activation_fn: Activation function applied to each layer. If `None`,
        will use `tf.nn.relu`.
      dropout: When not None, the probability we will drop out
        a given coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Optional scope for variables created in this model. If not scope
        is supplied, one is generated.
    """
    scope = "dnn" if not scope else scope
    super(DNNComposableModel, self).__init__(
        num_label_columns=num_label_columns,
        optimizer=optimizer,
        gradient_clip_norm=gradient_clip_norm,
        num_ps_replicas=num_ps_replicas,
        scope=scope)
    self._hidden_units = hidden_units
    self._activation_fn = activation_fn
    self._dropout = dropout
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               num_label_columns,
               hidden_units,
               optimizer=None,
               activation_fn=nn.relu,
               dropout=None,
               gradient_clip_norm=None,
               num_ps_replicas=0,
               scope=None):
    """Initializes DNNComposableModel objects.

    Args:
      num_label_columns: The number of label columns.
      hidden_units: List of hidden units per layer. All layers are fully
        connected.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      activation_fn: Activation function applied to each layer. If `None`,
        will use `tf.nn.relu`.
      dropout: When not None, the probability we will drop out
        a given coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Optional scope for variables created in this model. If not scope
        is supplied, one is generated.
    """
    scope = "dnn" if not scope else scope
    super(DNNComposableModel, self).__init__(
        num_label_columns=num_label_columns,
        optimizer=optimizer,
        gradient_clip_norm=gradient_clip_norm,
        num_ps_replicas=num_ps_replicas,
        scope=scope)
    self._hidden_units = hidden_units
    self._activation_fn = activation_fn
    self._dropout = dropout
项目:ICGANs    作者:cameronfabbri    | 项目源码 | 文件源码
def encZ(x, ACTIVATION):

   conv1 = tcl.conv2d(x, 32, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv1')
   conv1 = activate(conv1, ACTIVATION)

   conv2 = tcl.conv2d(conv1, 64, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv2')
   conv2 = activate(conv2, ACTIVATION)

   conv3 = tcl.conv2d(conv2, 128, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv3')
   conv3 = activate(conv3, ACTIVATION)

   conv4 = tcl.conv2d(conv3, 256, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv4')
   conv4 = activate(conv4, ACTIVATION)

   conv4_flat = tcl.flatten(conv4)

   fc1 = tcl.fully_connected(conv4_flat, 4096, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc1')
   fc1 = activate(fc1, ACTIVATION)
   #fc1 = tcl.dropout(fc1, 0.5)

   fc2 = tcl.fully_connected(fc1, 100, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc2')

   print 'input:',x
   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print 'conv4:',conv4
   print 'fc1:',fc1
   print 'fc2:',fc2
   print 'END ENCODER\n'

   tf.add_to_collection('vars', conv1)
   tf.add_to_collection('vars', conv2)
   tf.add_to_collection('vars', conv3)
   tf.add_to_collection('vars', conv4)
   tf.add_to_collection('vars', fc1)
   tf.add_to_collection('vars', fc2)

   return fc2
项目:Hands-On-Deep-Learning-with-TensorFlow    作者:PacktPublishing    | 项目源码 | 文件源码
def conv_learn(X, y, mode):
    # Ensure our images are 2d 
    X = tf.reshape(X, [-1, 36, 36, 1])
    # We'll need these in one-hot format
    y = tf.one_hot(tf.cast(y, tf.int32), 5, 1, 0)

    # conv layer will compute 4 kernels for each 5x5 patch
    with tf.variable_scope('conv_layer'):
        # 5x5 convolution, pad with zeros on edges
        h1 = layers.convolution2d(X, num_outputs=4,
                kernel_size=[5, 5], 
                activation_fn=tf.nn.relu)
        # 2x2 Max pooling, no padding on edges
        p1 = tf.nn.max_pool(h1, ksize=[1, 2, 2, 1],
                strides=[1, 2, 2, 1], padding='VALID')

        # Need to flatten conv output for use in dense layer
    p1_size = np.product(
              [s.value for s in p1.get_shape()[1:]])
    p1f = tf.reshape(p1, [-1, p1_size ])

    # densely connected layer with 32 neurons and dropout
    h_fc1 = layers.fully_connected(p1f,
             5,
             activation_fn=tf.nn.relu)
    drop = layers.dropout(h_fc1, keep_prob=0.5, is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)

    logits = layers.fully_connected(drop, 5, activation_fn=None)
    loss = tf.losses.softmax_cross_entropy(y, logits)
    # Setup the training function manually
    train_op = layers.optimize_loss(
        loss,
        tf.contrib.framework.get_global_step(),
        optimizer='Adam',
        learning_rate=0.01)
    return tf.argmax(logits, 1), loss, train_op
# Use generic estimator with our function
项目:Hands-On-Deep-Learning-with-TensorFlow    作者:PacktPublishing    | 项目源码 | 文件源码
def conv_learn(X, y, mode):
    # Ensure our images are 2d 
    X = tf.reshape(X, [-1, 36, 36, 1])
    # We'll need these in one-hot format
    y = tf.one_hot(tf.cast(y, tf.int32), 5, 1, 0)

    # conv layer will compute 4 kernels for each 5x5 patch
    with tf.variable_scope('conv_layer'):
        # 5x5 convolution, pad with zeros on edges
        h1 = layers.convolution2d(X, num_outputs=4,
                kernel_size=[5, 5], 
                activation_fn=tf.nn.relu)
        # 2x2 Max pooling, no padding on edges
        p1 = tf.nn.max_pool(h1, ksize=[1, 2, 2, 1],
                strides=[1, 2, 2, 1], padding='VALID')

        # Need to flatten conv output for use in dense layer
    p1_size = np.product(
              [s.value for s in p1.get_shape()[1:]])
    p1f = tf.reshape(p1, [-1, p1_size ])

    # densely connected layer with 32 neurons and dropout
    h_fc1 = layers.fully_connected(p1f,
             5,
             activation_fn=tf.nn.relu)
    drop = layers.dropout(h_fc1, keep_prob=0.5, is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)

    logits = layers.fully_connected(drop, 5, activation_fn=None)
    loss = tf.losses.softmax_cross_entropy(y, logits)
    # Setup the training function manually
    train_op = layers.optimize_loss(
        loss,
        tf.contrib.framework.get_global_step(),
        optimizer='Adam',
        learning_rate=0.01)
    return tf.argmax(logits, 1), loss, train_op
# Use generic estimator with our function
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def _build_vgg16(
        inputs,
        num_classes=1000,
        dropout_keep_prob=0.5,
        is_training=True,
        scope=''):
    """Blah"""

    endpoints = {}
    with tf.name_scope(scope, 'vgg16', [inputs]):
        with arg_scope(
                [layers.batch_norm, layers.dropout], is_training=is_training):
            with arg_scope(
                    [layers.conv2d, layers.max_pool2d], 
                    stride=1,
                    padding='SAME'):

                net = _block_a(inputs, endpoints, d=64, scope='Scale1')
                net = _block_a(net, endpoints, d=128, scope='Scale2')
                net = _block_b(net, endpoints, d=256, scope='Scale3')
                net = _block_b(net, endpoints, d=512, scope='Scale4')
                net = _block_b(net, endpoints, d=512, scope='Scale5')
                logits = _block_output(net, endpoints, num_classes, dropout_keep_prob)

                endpoints['Predictions'] = tf.nn.softmax(logits, name='Predictions')
                return logits, endpoints
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def _build_vgg19(
        inputs,
        num_classes=1000,
        dropout_keep_prob=0.5,
        is_training=True,
        scope=''):
    """Blah"""

    endpoints = {}
    with tf.name_scope(scope, 'vgg19', [inputs]):
        with arg_scope(
                [layers.batch_norm, layers.dropout], is_training=is_training):
            with arg_scope(
                    [layers.conv2d, layers.max_pool2d],
                    stride=1,
                    padding='SAME'):

                net = _block_a(inputs, endpoints, d=64, scope='Scale1')
                net = _block_a(net, endpoints, d=128, scope='Scale2')
                net = _block_c(net, endpoints, d=256, scope='Scale3')
                net = _block_c(net, endpoints, d=512, scope='Scale4')
                net = _block_c(net, endpoints, d=512, scope='Scale5')
                logits = _block_output(net, endpoints, num_classes, dropout_keep_prob)

                endpoints['Predictions'] = tf.nn.softmax(logits, name='Predictions')
                return logits, endpoints
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def _block_output(net, endpoints, num_classes=1000, dropout_keep_prob=0.5, scope='Output'):
    with tf.variable_scope(scope):
        # 8 x 8 x 1536
        shape = net.get_shape()
        net = layers.avg_pool2d(net, shape[1:3], padding='VALID', scope='Pool1_Global')
        endpoints['Output/Pool1'] = net
        # 1 x 1 x 1536
        net = layers.dropout(net, dropout_keep_prob)
        net = layers.flatten(net)
        # 1536
        net = layers.fully_connected(net, num_classes, activation_fn=None, scope='Logits')
        # num classes
        endpoints['Logits'] = net
    return net
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def discriminator(inputs, reuse=False):
    with tf.variable_scope('discriminator'):
        if reuse:
            tf.get_variable_scope().reuse_variables()
        net = lays.conv2d_transpose(inputs, 64, 3, stride=1, scope='conv1', padding='SAME', activation_fn=leaky_relu)
        net = lays.max_pool2d(net, 2, 2, 'SAME', scope='max1')
        net = lays.conv2d_transpose(net, 128, 3, stride=1, scope='conv2', padding='SAME', activation_fn=leaky_relu)
        net = lays.max_pool2d(net, 2, 2, 'SAME', scope='max2')
        net = lays.conv2d_transpose(net, 256, 3, stride=1, scope='conv3', padding='SAME', activation_fn=leaky_relu)
        net = lays.max_pool2d(net, 2, 2, 'SAME', scope='max3')
        net = tf.reshape(net, (batch_size, 4 * 4 * 256))
        net = lays.fully_connected(net, 128, scope='fc1', activation_fn=leaky_relu)
        net = lays.dropout(net, 0.5)
        net = lays.fully_connected(net, 1, scope='fc2', activation_fn=None)
        return net
项目:emoatt    作者:epochx    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    if isinstance(self.state_size, tuple) != isinstance(self._zoneout_prob, tuple):
      raise TypeError("Subdivided states need subdivided zoneouts.")
    if isinstance(self.state_size, tuple) and len(tuple(self.state_size)) != len(tuple(self._zoneout_prob)):
      raise ValueError("State and zoneout need equally many parts.")
    output, new_state = self._cell(inputs, state, scope)
    if isinstance(self.state_size, tuple):
      if self.is_training:
        new_state = self._tuple([(1 - state_part_zoneout_prob) * dropout(
          new_state_part - state_part, (1 - state_part_zoneout_prob)) + state_part
                          for new_state_part, state_part, state_part_zoneout_prob in
                          zip(new_state, state, self._zoneout_prob)])
      else:
        new_state = self._tuple([state_part_zoneout_prob * state_part + (1 - state_part_zoneout_prob) * new_state_part
                          for new_state_part, state_part, state_part_zoneout_prob in
                          zip(new_state, state, self._zoneout_prob)])
    else:
      if self.is_training:
        new_state = (1 - state_part_zoneout_prob) * dropout(
          new_state_part - state_part, (1 - state_part_zoneout_prob)) + state_part
      else:
        new_state = state_part_zoneout_prob * state_part + (1 - state_part_zoneout_prob) * new_state_part
    return output, new_state

# # Wrap your cells like this
# cell = ZoneoutWrapper(tf.nn.rnn_cell.LSTMCell(hidden_units, initializer=random_uniform(), state_is_tuple=True),
# zoneout_prob=(z_prob_cells, z_prob_states))
项目:kaggle_redefining_cancer_treatment    作者:jorgemf    | 项目源码 | 文件源码
def model(self, input_text_begin, input_text_end, gene, variation, num_output_classes,
              batch_size, embeddings, training=True, dropout=TC_MODEL_DROPOUT):

        """
        Creates a model for text classification
        :param tf.Tensor input_text: the input data, the text as
        [batch_size, text_vector_max_length, embeddings_size]
        :param int num_output_classes: the number of output classes for the classifier
        :param int batch_size: batch size, the same used in the dataset
        :param List[List[float]] embeddings: a matrix with the embeddings for the embedding lookup
        :param int num_hidden: number of hidden GRU cells in every layer
        :param int num_layers: number of layers of the model
        :param float dropout: dropout value between layers
        :param boolean training: whether the model is built for training or not
        :return Dict[str,tf.Tensor]: a dict with logits and prediction tensors
        """
        input_text_begin = tf.reshape(input_text_begin, [batch_size, MAX_WORDS])
        if input_text_end is not None:
            input_text_end = tf.reshape(input_text_end, [batch_size, MAX_WORDS])
        embedded_sequence_begin, sequence_length_begin, \
        embedded_sequence_end, sequence_length_end, \
        gene, variation = \
            self.model_embedded_sequence(embeddings, input_text_begin, input_text_end, gene,
                                         variation)
        _, max_length, _ = tf.unstack(tf.shape(embedded_sequence_begin))

        with tf.variable_scope('text_begin'):
            output_begin = self.rnn(embedded_sequence_begin, sequence_length_begin, max_length,
                                    dropout, batch_size, training)
        if input_text_end is not None:
            with tf.variable_scope('text_end'):
                output_end = self.rnn(embedded_sequence_end, sequence_length_end, max_length,
                                      dropout, batch_size, training)
            output = tf.concat([output_begin, output_end], axis=1)
        else:
            output = output_begin

        # full connected layer
        logits = self.model_fully_connected(output, gene, variation, num_output_classes, dropout,
                                            training)

        prediction = tf.nn.softmax(logits)

        return {
            'logits'    : logits,
            'prediction': prediction,
            }
项目:gait-recognition    作者:marian-margeta    | 项目源码 | 文件源码
def get_network(self, input_tensor, is_training, reuse = False):
        net = input_tensor

        with tf.variable_scope('GaitNN', reuse = reuse):
            with slim.arg_scope(self.get_arg_scope(is_training)):
                with tf.variable_scope('DownSampling'):
                    with tf.variable_scope('17x17'):
                        net = layers.convolution2d(net, num_outputs = 256, kernel_size = 1)
                        slim.repeat(net, 3, self.residual_block, ch = 256, ch_inner = 64)

                    with tf.variable_scope('8x8'):
                        net = self.residual_block(net, ch = 512, ch_inner = 64, stride = 2)
                        slim.repeat(net, 2, self.residual_block, ch = 512, ch_inner = 128)

                    with tf.variable_scope('4x4'):
                        net = self.residual_block(net, ch = 512, ch_inner = 128, stride = 2)
                        slim.repeat(net, 1, self.residual_block, ch = 512, ch_inner = 256)

                        net = layers.convolution2d(net, num_outputs = 256, kernel_size = 1)
                        net = layers.convolution2d(net, num_outputs = 256, kernel_size = 3)

                with tf.variable_scope('FullyConnected'):
                    # net = tf.reduce_mean(net, [1, 2], name = 'GlobalPool')
                    net = layers.flatten(net)
                    net = layers.fully_connected(net, 512, activation_fn = None, normalizer_fn = None)

                with tf.variable_scope('Recurrent', initializer = tf.contrib.layers.xavier_initializer()):
                    cell_type = {
                        'GRU': tf.nn.rnn_cell.GRUCell,
                        'LSTM': tf.nn.rnn_cell.LSTMCell
                    }

                    cell = cell_type[self.recurrent_unit](self.FEATURES)
                    cell = tf.nn.rnn_cell.MultiRNNCell([cell] * self.rnn_layers, state_is_tuple = True)

                    net = tf.expand_dims(net, 0)
                    net, state = tf.nn.dynamic_rnn(cell, net, initial_state = cell.zero_state(1, dtype = tf.float32))
                    net = tf.reshape(net, [-1, self.FEATURES])

                    # Temporal Avg-Pooling
                    gait_signature = tf.reduce_mean(net, 0)

                if is_training:
                    net = tf.expand_dims(gait_signature, 0)
                    net = layers.dropout(net, 0.7)

                    with tf.variable_scope('Logits'):
                        net = layers.fully_connected(net, self.num_of_persons, activation_fn = None,
                                                     normalizer_fn = None)

                return net, gait_signature, state
项目:hierarchical-attention-networks    作者:ematvey    | 项目源码 | 文件源码
def _init_body(self, scope):
    with tf.variable_scope(scope):

      word_level_inputs = tf.reshape(self.inputs_embedded, [
        self.document_size * self.sentence_size,
        self.word_size,
        self.embedding_size
      ])
      word_level_lengths = tf.reshape(
        self.word_lengths, [self.document_size * self.sentence_size])

      with tf.variable_scope('word') as scope:
        word_encoder_output, _ = bidirectional_rnn(
          self.word_cell, self.word_cell,
          word_level_inputs, word_level_lengths,
          scope=scope)

        with tf.variable_scope('attention') as scope:
          word_level_output = task_specific_attention(
            word_encoder_output,
            self.word_output_size,
            scope=scope)

        with tf.variable_scope('dropout'):
          word_level_output = layers.dropout(
            word_level_output, keep_prob=self.dropout_keep_proba,
            is_training=self.is_training,
          )

      # sentence_level

      sentence_inputs = tf.reshape(
        word_level_output, [self.document_size, self.sentence_size, self.word_output_size])

      with tf.variable_scope('sentence') as scope:
        sentence_encoder_output, _ = bidirectional_rnn(
          self.sentence_cell, self.sentence_cell, sentence_inputs, self.sentence_lengths, scope=scope)

        with tf.variable_scope('attention') as scope:
          sentence_level_output = task_specific_attention(
            sentence_encoder_output, self.sentence_output_size, scope=scope)

        with tf.variable_scope('dropout'):
          sentence_level_output = layers.dropout(
            sentence_level_output, keep_prob=self.dropout_keep_proba,
            is_training=self.is_training,
          )

      with tf.variable_scope('classifier'):
        self.logits = layers.fully_connected(
          sentence_level_output, self.classes, activation_fn=None)

        self.prediction = tf.argmax(self.logits, axis=-1)
项目:merlin    作者:CSTR-Edinburgh    | 项目源码 | 文件源码
def define_feedforward_model(self):
      layer_list=[]
      with self.graph.as_default() as g:
          is_training_batch=tf.placeholder(tf.bool,shape=(),name="is_training_batch")
          bn_params={"is_training":is_training_batch,"decay":0.99,"updates_collections":None}
          g.add_to_collection("is_training_batch",is_training_batch)
          with tf.name_scope("input"):
              input_layer=tf.placeholder(dtype=tf.float32,shape=(None,self.n_in),name="input_layer")
              if self.dropout_rate!=0.0:
                 print "Using dropout to avoid overfitting and the dropout rate is",self.dropout_rate
                 is_training_drop=tf.placeholder(dtype=tf.bool,shape=(),name="is_training_drop")
                 input_layer_drop=dropout(input_layer,self.dropout_rate,is_training=is_training_drop)
                 layer_list.append(input_layer_drop)
                 g.add_to_collection(name="is_training_drop",value=is_training_drop)
              else:
                 layer_list.append(input_layer)
          g.add_to_collection("input_layer",layer_list[0])
          for i in xrange(len(self.hidden_layer_size)):
              with tf.name_scope("hidden_layer_"+str(i+1)):
                if self.dropout_rate!=0.0:
                    last_layer=layer_list[-1]
                    if self.hidden_layer_type[i]=="tanh":
                       new_layer=fully_connected(last_layer,self.hidden_layer_size[i],activation_fn=tf.nn.tanh,normalizer_fn=batch_norm,\
                                  normalizer_params=bn_params)
                    if self.hidden_layer_type[i]=="sigmoid":
                        new_layer=fully_connected(last_layer,self.hidden_layer_size[i],activation_fn=tf.nn.sigmoid,normalizer_fn=batch_norm,\
                                  normalizer_params=bn_params)
                    new_layer_drop=dropout(new_layer,self.dropout_rate,is_training=is_training_drop)
                    layer_list.append(new_layer_drop)
                else:
                    last_layer=layer_list[-1]
                    if self.hidden_layer_type[i]=="tanh":
                       new_layer=fully_connected(last_layer,self.hidden_layer_size[i],activation_fn=tf.nn.tanh,normalizer_fn=batch_norm,\
                                 normalizer_params=bn_params)
                    if self.hidden_layer_type[i]=="sigmoid":
                       new_layer=fully_connected(last_layer,self.hidden_layer_size[i],activation_fn=tf.nn.sigmoid,normalizer_fn=batch_norm,\
                                 normalizer_params=bn_params)
                    layer_list.append(new_layer)
          with tf.name_scope("output_layer"):
              if self.output_type=="linear":
                 output_layer=fully_connected(layer_list[-1],self.n_out,activation_fn=None)
              if self.output_type=="tanh":
                 output_layer=fully_connected(layer_list[-1],self.n_out,activation_fn=tf.nn.tanh)
              g.add_to_collection(name="output_layer",value=output_layer)
          with tf.name_scope("training_op"):
               if self.optimizer=="adam":
                  self.training_op=tf.train.AdamOptimizer()
项目:merlin    作者:CSTR-Edinburgh    | 项目源码 | 文件源码
def define_sequence_model(self):
      seed=12345
      np.random.seed(12345)
      layer_list=[]
      with self.graph.as_default() as g:
          utt_length=tf.placeholder(tf.int32,shape=(None))
          g.add_to_collection(name="utt_length",value=utt_length)
          with tf.name_scope("input"):
               input_layer=tf.placeholder(dtype=tf.float32,shape=(None,None,self.n_in),name="input_layer")
               if self.dropout_rate!=0.0:
                  print "Using dropout to avoid overfitting and the dropout rate is",self.dropout_rate
                  is_training_drop=tf.placeholder(dtype=tf.bool,shape=(),name="is_training_drop")
                  input_layer_drop=dropout(input_layer,self.dropout_rate,is_training=is_training_drop)
                  layer_list.append(input_layer_drop)
                  g.add_to_collection(name="is_training_drop",value=is_training_drop)
               else:
                  layer_list.append(input_layer)
          g.add_to_collection("input_layer",layer_list[0])
          with tf.name_scope("hidden_layer"):
             basic_cell=[]
             if "tanh" in self.hidden_layer_type:
                 is_training_batch=tf.placeholder(dtype=tf.bool,shape=(),name="is_training_batch")
                 bn_params={"is_training":is_training_batch,"decay":0.99,"updates_collections":None}
                 g.add_to_collection("is_training_batch",is_training_batch)
             for i in xrange(len(self.hidden_layer_type)):
                 if self.dropout_rate!=0.0:
                     if self.hidden_layer_type[i]=="tanh":
                         new_layer=fully_connected(layer_list[-1],self.hidden_layer_size[i],activation_fn=tf.nn.tanh,normalizer_fn=batch_norm,normalizer_params=bn_params)
                         new_layer_drop=dropout(new_layer,self.dropout_rate,is_training=is_training_drop)
                         layer_list.append(new_layer_drop)
                     if self.hidden_layer_type[i]=="lstm":
                         basic_cell.append(MyDropoutWrapper(BasicLSTMCell(num_units=self.hidden_layer_size[i]),self.dropout_rate,self.dropout_rate,is_training=is_training_drop))
                     if self.hidden_layer_type[i]=="gru":
                         basic_cell.append(MyDropoutWrapper(GRUCell(num_units=self.hidden_layer_size[i]),self.dropout_rate,self.dropout_rate,is_training=is_training_drop))
                 else:
                     if self.hidden_layer_type[i]=="tanh":
                        new_layer=fully_connected(layer_list[-1],self.hidden_layer_size[i],activation_fn=tf.nn.tanh,normalizer_fn=batch_norm,normalizer_params=bn_params)
                        layer_list.append(new_layer)
                     if self.hidden_layer_type[i]=="lstm":
                        basic_cell.append(LayerNormBasicLSTMCell(num_units=self.hidden_layer_size[i]))
                     if self.hidden_layer_type[i]=="gru":
                        basic_cell.append(LayerNormGRUCell(num_units=self.hidden_layer_size[i]))
             multi_cell=MultiRNNCell(basic_cell)
             rnn_outputs,rnn_states=tf.nn.dynamic_rnn(multi_cell,layer_list[-1],dtype=tf.float32,sequence_length=utt_length)
             layer_list.append(rnn_outputs)
          with tf.name_scope("output_layer"):
               if self.output_type=="linear" :
                   output_layer=tf.layers.dense(rnn_outputs,self.n_out)
                #  stacked_rnn_outputs=tf.reshape(rnn_outputs,[-1,self.n_out])
                #  stacked_outputs=tf.layers.dense(stacked_rnn_outputs,self.n_out)
                #  output_layer=tf.reshape(stacked_outputs,[-1,utt_length,self.n_out])
               g.add_to_collection(name="output_layer",value=output_layer)
          with tf.name_scope("training_op"):
               if self.optimizer=="adam":
                   self.training_op=tf.train.AdamOptimizer()
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def build_model(self, features, feature_columns, is_training):
    """See base class."""
    self._feature_columns = feature_columns

    input_layer_partitioner = (
        partitioned_variables.min_max_variable_partitioner(
            max_partitions=self._num_ps_replicas,
            min_slice_size=64 << 20))
    with variable_scope.variable_scope(
        self._scope + "/input_from_feature_columns",
        values=features.values(),
        partitioner=input_layer_partitioner) as scope:
      net = layers.input_from_feature_columns(
          features,
          self._get_feature_columns(),
          weight_collections=[self._scope],
          scope=scope)

    hidden_layer_partitioner = (
        partitioned_variables.min_max_variable_partitioner(
            max_partitions=self._num_ps_replicas))
    for layer_id, num_hidden_units in enumerate(self._hidden_units):
      with variable_scope.variable_scope(
          self._scope + "/hiddenlayer_%d" % layer_id,
          values=[net],
          partitioner=hidden_layer_partitioner) as scope:
        net = layers.fully_connected(
            net,
            num_hidden_units,
            activation_fn=self._activation_fn,
            variables_collections=[self._scope],
            scope=scope)
        if self._dropout is not None and is_training:
          net = layers.dropout(
              net,
              keep_prob=(1.0 - self._dropout))
      self._add_hidden_layer_summary(net, scope.name)

    with variable_scope.variable_scope(
        self._scope + "/logits",
        values=[net],
        partitioner=hidden_layer_partitioner) as scope:
      logits = layers.fully_connected(
          net,
          self._num_label_columns,
          activation_fn=None,
          variables_collections=[self._scope],
          scope=scope)
    self._add_hidden_layer_summary(logits, "logits")
    return logits
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def build_model(self, features, feature_columns, is_training):
    """See base class."""
    self._feature_columns = feature_columns

    input_layer_partitioner = (
        partitioned_variables.min_max_variable_partitioner(
            max_partitions=self._num_ps_replicas,
            min_slice_size=64 << 20))
    with variable_scope.variable_scope(
        self._scope + "/input_from_feature_columns",
        values=features.values(),
        partitioner=input_layer_partitioner) as scope:
      net = layers.input_from_feature_columns(
          features,
          self._get_feature_columns(),
          weight_collections=[self._scope],
          scope=scope)

    hidden_layer_partitioner = (
        partitioned_variables.min_max_variable_partitioner(
            max_partitions=self._num_ps_replicas))
    for layer_id, num_hidden_units in enumerate(self._hidden_units):
      with variable_scope.variable_scope(
          self._scope + "/hiddenlayer_%d" % layer_id,
          values=[net],
          partitioner=hidden_layer_partitioner) as scope:
        net = layers.fully_connected(
            net,
            num_hidden_units,
            activation_fn=self._activation_fn,
            variables_collections=[self._scope],
            scope=scope)
        if self._dropout is not None and is_training:
          net = layers.dropout(
              net,
              keep_prob=(1.0 - self._dropout))
      self._add_hidden_layer_summary(net, scope.name)

    with variable_scope.variable_scope(
        self._scope + "/logits",
        values=[net],
        partitioner=hidden_layer_partitioner) as scope:
      logits = layers.fully_connected(
          net,
          self._num_label_columns,
          activation_fn=None,
          variables_collections=[self._scope],
          scope=scope)
    self._add_hidden_layer_summary(logits, "logits")
    return logits
项目:ICGANs    作者:cameronfabbri    | 项目源码 | 文件源码
def encZ(x, ACTIVATION):

   conv1 = tcl.conv2d(x, 32, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv1')
   conv1 = activate(conv1, ACTIVATION)

   conv2 = tcl.conv2d(conv1, 64, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv2')
   conv2 = activate(conv2, ACTIVATION)

   conv3 = tcl.conv2d(conv2, 128, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv3')
   conv3 = activate(conv3, ACTIVATION)

   conv4 = tcl.conv2d(conv3, 256, 5, 2, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='conv4')
   conv4 = activate(conv4, ACTIVATION)

   conv4_flat = tcl.flatten(conv4)

   fc1 = tcl.fully_connected(conv4_flat, 4096, activation_fn=tf.identity, normalizer_fn=tcl.batch_norm, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc1')
   fc1 = activate(fc1, ACTIVATION)
   fc1 = tcl.dropout(fc1, 0.5)

   fc2 = tcl.fully_connected(fc1, 100, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='fc2')

   # relu to match the [0,1] range from the distribution
   fc2 = relu(fc2)

   print 'input:',x
   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print 'conv4:',conv4
   print 'fc1:',fc1
   print 'fc2:',fc2
   print 'END ENCODER\n'

   tf.add_to_collection('vars', conv1)
   tf.add_to_collection('vars', conv2)
   tf.add_to_collection('vars', conv3)
   tf.add_to_collection('vars', conv4)
   tf.add_to_collection('vars', fc1)
   tf.add_to_collection('vars', fc2)

   return fc2
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def _build_inception_v4(
        inputs,
        stack_counts=[4, 7, 3],
        dropout_keep_prob=0.8,
        num_classes=1000,
        is_training=True,
        scope=''):
    """Inception v4 from http://arxiv.org/abs/

    Args:
      inputs: a tensor of size [batch_size, height, width, channels].
      dropout_keep_prob: dropout keep_prob.
      num_classes: number of predicted classes.
      is_training: whether is training or not.
      scope: Optional scope for op_scope.

    Returns:
      a list containing 'logits' Tensors and a dict of Endpoints.
    """
    # endpoints will collect relevant activations for external use, for example, summaries or losses.
    endpoints = {}
    name_scope_net = tf.name_scope(scope, 'Inception_v4', [inputs])
    arg_scope_train = arg_scope([layers.batch_norm, layers.dropout], is_training=is_training)
    arg_scope_conv = arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], stride=1, padding='SAME')
    with name_scope_net, arg_scope_train, arg_scope_conv:

        net = _block_stem(inputs, endpoints)
        # 35 x 35 x 384

        with tf.variable_scope('Scale1'):
            net = _stack(net, endpoints, fn=_block_a, count=stack_counts[0], scope='BlockA')
            # 35 x 35 x 384

        with tf.variable_scope('Scale2'):
            net = _block_a_reduce(net, endpoints)
            # 17 x 17 x 1024
            net = _stack(net, endpoints, fn=_block_b, count=stack_counts[1], scope='BlockB')
            # 17 x 17 x 1024

        with tf.variable_scope('Scale3'):
            net = _block_b_reduce(net, endpoints)
            # 8 x 8 x 1536
            net = _stack(net, endpoints, fn=_block_c, count=stack_counts[2], scope='BlockC')
            # 8 x 8 x 1536

        logits = _block_output(net, endpoints, num_classes, dropout_keep_prob, scope='Output')
        endpoints['Predictions'] = tf.nn.softmax(logits, name='Predictions')

        return logits, endpoints
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def __init__(self,
               num_label_columns,
               hidden_units,
               optimizer=None,
               activation_fn=nn.relu,
               dropout=None,
               gradient_clip_norm=None,
               num_ps_replicas=0,
               scope=None,
               trainable=True):
    """Initializes DNNComposableModel objects.

    Args:
      num_label_columns: The number of label columns.
      hidden_units: List of hidden units per layer. All layers are fully
        connected.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      activation_fn: Activation function applied to each layer. If `None`,
        will use `tf.nn.relu`.
      dropout: When not None, the probability we will drop out
        a given coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Optional scope for variables created in this model. If not scope
        is supplied, one is generated.
      trainable: True if this model contains variables that can be trained.
        False otherwise (in cases where the variables are used strictly for
        transforming input labels for training).
    """
    scope = "dnn" if not scope else scope
    super(DNNComposableModel, self).__init__(
        num_label_columns=num_label_columns,
        optimizer=optimizer,
        gradient_clip_norm=gradient_clip_norm,
        num_ps_replicas=num_ps_replicas,
        scope=scope,
        trainable=trainable)
    self._hidden_units = hidden_units
    self._activation_fn = activation_fn
    self._dropout = dropout
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def build_model(self, features, feature_columns, is_training):
    """See base class."""
    self._feature_columns = feature_columns

    input_layer_partitioner = (
        partitioned_variables.min_max_variable_partitioner(
            max_partitions=self._num_ps_replicas, min_slice_size=64 << 20))
    with variable_scope.variable_scope(
        self._scope + "/input_from_feature_columns",
        values=features.values(),
        partitioner=input_layer_partitioner) as scope:
      net = layers.input_from_feature_columns(
          features,
          self._get_feature_columns(),
          weight_collections=[self._scope],
          trainable=self._trainable,
          scope=scope)

    hidden_layer_partitioner = (
        partitioned_variables.min_max_variable_partitioner(
            max_partitions=self._num_ps_replicas))
    for layer_id, num_hidden_units in enumerate(self._hidden_units):
      with variable_scope.variable_scope(
          self._scope + "/hiddenlayer_%d" % layer_id,
          values=[net],
          partitioner=hidden_layer_partitioner) as scope:
        net = layers.fully_connected(
            net,
            num_hidden_units,
            activation_fn=self._activation_fn,
            variables_collections=[self._scope],
            trainable=self._trainable,
            scope=scope)
        if self._dropout is not None and is_training:
          net = layers.dropout(net, keep_prob=(1.0 - self._dropout))
      self._add_hidden_layer_summary(net, scope.name)

    with variable_scope.variable_scope(
        self._scope + "/logits",
        values=[net],
        partitioner=hidden_layer_partitioner) as scope:
      logits = layers.fully_connected(
          net,
          self._num_label_columns,
          activation_fn=None,
          variables_collections=[self._scope],
          trainable=self._trainable,
          scope=scope)
    self._add_hidden_layer_summary(logits, "logits")
    return logits