Python tensorflow 模块,norm() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.norm()

项目:tensorflow-adversarial    作者:gongzhitaao    | 项目源码 | 文件源码
def _deepfool2(model, x, epochs, eta, clip_min, clip_max, min_prob):
    y0 = tf.stop_gradient(tf.reshape(model(x), [-1])[0])
    y0 = tf.to_int32(tf.greater(y0, 0.5))

    def _cond(i, z):
        xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max)
        y = tf.stop_gradient(tf.reshape(model(xadv), [-1])[0])
        y = tf.to_int32(tf.greater(y, 0.5))
        return tf.logical_and(tf.less(i, epochs), tf.equal(y0, y))

    def _body(i, z):
        xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max)
        y = tf.reshape(model(xadv), [-1])[0]
        g = tf.gradients(y, xadv)[0]
        dx = - y * g / tf.norm(g)
        return i+1, z+dx

    _, noise = tf.while_loop(_cond, _body, [0, tf.zeros_like(x)],
                             name='_deepfool2_impl', back_prop=False)
    return noise
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def linear_mapping_weightnorm(inputs, out_dim, in_dim=None, dropout=1.0, var_scope_name="linear_mapping"):
  with tf.variable_scope(var_scope_name):
    input_shape = inputs.get_shape().as_list()    # static shape. may has None
    input_shape_tensor = tf.shape(inputs)    
    # use weight normalization (Salimans & Kingma, 2016)  w = g* v/2-norm(v)
    V = tf.get_variable('V', shape=[int(input_shape[-1]), out_dim], dtype=tf.float32, initializer=tf.random_normal_initializer(mean=0, stddev=tf.sqrt(dropout*1.0/int(input_shape[-1]))), trainable=True)
    V_norm = tf.norm(V.initialized_value(), axis=0)  # V shape is M*N,  V_norm shape is N
    g = tf.get_variable('g', dtype=tf.float32, initializer=V_norm, trainable=True)
    b = tf.get_variable('b', shape=[out_dim], dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=True)   # weightnorm bias is init zero

    assert len(input_shape) == 3
    inputs = tf.reshape(inputs, [-1, input_shape[-1]])
    inputs = tf.matmul(inputs, V)
    inputs = tf.reshape(inputs, [input_shape_tensor[0], -1, out_dim])
    #inputs = tf.matmul(inputs, V)    # x*v

    scaler = tf.div(g, tf.norm(V, axis=0))   # g/2-norm(v)
    inputs = tf.reshape(scaler,[1, out_dim])*inputs + tf.reshape(b,[1, out_dim])   # x*v g/2-norm(v) + b


    return inputs
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def imagenet(self, image_feat, reuse=False,skip=False):
        if skip:
            return image_feat
        with tf.variable_scope('image_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            image_fc1 = tf.nn.dropout(tf.contrib.layers.fully_connected(image_feat,2048, weights_regularizer=wd,scope='i_fc1'),keep_prob=self.keep_prob)
            #logits1 =  tf.contrib.layers.fully_connected(image_fc1, self.num_class, weights_regularizer=wd, scope='i_fc1_softmax')
            logits  = tf.contrib.layers.fully_connected(image_fc1, self.num_class,activation_fn=None, weights_regularizer=wd, scope='i_fc2_softmax')
            #drop_fc1 = tf.nn.dropout(image_fc1, self.keep_prob, name='drop_fc1')
            image_fc2 = tf.contrib.layers.fully_connected(image_fc1, 512, activation_fn=None, weights_regularizer=wd, scope='i_fc2')
            image_fc2_bn = tf.contrib.layers.batch_norm(image_fc2, center=True, scale=True, is_training=self.is_training, 
                                                        reuse=reuse, decay=0.999, updates_collections=None, 
                                                        scope='i_fc2_bn')
            embed = image_fc2_bn / tf.norm(image_fc2_bn,axis=-1,keep_dims=True)
        self.endpoint['image_fc1'] = image_fc1
        self.endpoint['image_fc2'] = embed
        #self.endpoint['logits1'] = logits1
        self.endpoint['logits'] = logits
        return embed,logits
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def sentence_concat(self, tfidf, lda, reuse=False):
    with tf.variable_scope('sentence_concat', reuse=reuse) as scope:
        wd = tf.contrib.layers.l2_regularizer(self.weight_decay)

        tfidf_fc1 = tf.contrib.layers.fully_connected(tfidf, 2048, weights_regularizer=wd, scope='tfidf_fc1')   
        lda_fc1 = tf.contrib.layers.fully_connected(lda, 64, scope='lda_fc1')
        feat_concat = tf.concat([tfidf_fc1, lda_fc1], axis=1)
        #drop_fc1 = tf.nn.dropout(feat_concat, self.keep_prob, name='drop_fc1')
        sentence_fc2 = tf.contrib.layers.fully_connected(feat_concat, 512,activation_fn=None, weights_regularizer=wd, scope='s_fc2')
        sentence_fc2_bn = tf.contrib.layers.batch_norm(sentence_fc2, center=True, scale=True, is_training=self.is_training,
                                                           reuse=reuse, decay=0.999, updates_collections=None, 
                                                           scope='s_fc2_bn')    
        embed = sentence_fc2_bn/tf.norm(sentence_fc2_bn, axis= -1, keep_dims=True)

    self.endpoint['tfidf_fc1'] = tfidf_fc1
    self.endpoint['lda_fc1'] = lda_fc1  
    self.endpoint['concat_embed'] = embed
    return embed
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def sentencenet(self, sentence_emb, reuse=False):
        with tf.variable_scope('sentence_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            lstm_cell = tf.contrib.rnn.BasicLSTMCell(num_units=300)
            lstm_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell,input_keep_prob=self.keep_prob,output_keep_prob=self.keep_prob)
            zero_state = lstm_cell.zero_state(
                batch_size=self.sentence_emb.get_shape()[0], dtype=tf.float32)

            input_list = tf.unstack(self.sentence_emb,axis=1)
            output,_ = tf.contrib.rnn.static_rnn(lstm_cell, inputs=input_list,initial_state=zero_state)
            lstm_output = tf.concat(output[:100:1],axis=1)
            sentence_fc1 =tf.contrib.layers.fully_connected(lstm_output,2048, \
                                                            weights_regularizer=wd, scope='s_fc1') # 20*10*256
            sentence_fc2 = tf.contrib.layers.fully_connected(sentence_fc1, 512,activation_fn=None,normalizer_fn=tf.contrib.layers.batch_norm,\
                                                             normalizer_params={'is_training':self.is_training,'updates_collections':None}, weights_regularizer=wd, scope='s_fc2')
            sentence_fc2 = sentence_fc2/tf.norm(sentence_fc2,axis= -1,keep_dims=True)
        self.endpoint['sentence_lstm'] = lstm_output
        self.endpoint['sentence_fc1'] = sentence_fc1
        self.endpoint['sentence_fc2'] = sentence_fc2
        return sentence_fc2
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def sentencenet(self, sentence_emb, reuse=False,skip=False):
        if skip:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            sentence_fc2 = tf.contrib.layers.fully_connected(self.cluster_feature, 512,activation_fn=None, weights_regularizer=wd, scope='s_fc2')
            sentence_fc2 = sentence_fc2/(tf.norm(sentence_fc2,axis= -1,keep_dims=True)+1e-5)
            self.endpoint['sentence_fc2'] = sentence_fc2
            self.endpoint['cluster'] =self.cluster_feature
            return sentence_fc2
        with tf.variable_scope('sentence_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            sentence_fc1 =tf.nn.dropout(tf.contrib.layers.fully_connected(sentence_emb,2048, \
                                                            weights_regularizer=wd, scope='s_fc1'),keep_prob=self.keep_prob )# 20*10*256
            sentence_fc2 = tf.contrib.layers.fully_connected(tf.concat([sentence_fc1,self.cluster_feature],axis=1), 512,activation_fn=None,normalizer_fn=tf.contrib.layers.batch_norm,\
                                                             normalizer_params={'is_training':self.is_training,'updates_collections':None}, weights_regularizer=wd, scope='s_fc2')
            sentence_fc2 = sentence_fc2/tf.norm(sentence_fc2,axis= -1,keep_dims=True)
        self.endpoint['sentence_fc1'] = sentence_fc1
        self.endpoint['sentence_fc2'] = sentence_fc2
        return sentence_fc2
项目:RFHO    作者:lucfra    | 项目源码 | 文件源码
def norms_of_d_dynamics_d_hypers(fd=None):
        """
        In `ForwardHG` records the norm of the partial derivatives of the dynamics w.r.t. the hyperparameters.

        :param fd:
        :return:
        """
        if fd is None: fd = lambda stp, rs: rs

        def _call(*args, **kwargs):
            hg = args[0]
            if isinstance(hg, rf.HyperOptimizer):
                hg = hg.hyper_gradients  # guess most common case
            assert isinstance(hg, rf.ForwardHG)
            _rs = Records.tensors(*hg.d_dynamics_d_hypers, op=tf.norm,
                                  fd=fd,
                                  condition=lambda stp, rs: rs != 'INIT')(args, kwargs)
            return _rs

        return _call
项目:spykes    作者:KordingLab    | 项目源码 | 文件源码
def sparse_filtering_loss(_, y_pred):
    '''Defines the sparse filtering loss function.

    Args:
        y_true (tensor): The ground truth tensor (not used, since this is an
            unsupervised learning algorithm).
        y_pred (tensor): Tensor representing the feature vector at a
            particular layer.

    Returns:
        scalar tensor: The sparse filtering loss.
    '''
    y = tf.reshape(y_pred, tf.stack([-1, tf.reduce_prod(y_pred.shape[1:])]))
    l2_normed = tf.nn.l2_normalize(y, dim=1)
    l1_norm = tf.norm(l2_normed, ord=1, axis=1)
    return tf.reduce_sum(l1_norm)
项目:TensorflowFramework    作者:vahidk    | 项目源码 | 文件源码
def wgan_loss(x, gz, discriminator, beta=10.0):
  """Improved Wasserstein GAN loss.

  Args:
    x: Batch of real samples.
    gz: Batch of generated samples.
    discriminator: Discriminator function.
    beta: Regualarizer factor.
  Returns:
    d_loss: Discriminator loss.
    g_loss: Generator loss.
  """
  dx = discriminator(x)
  with tf.variable_scope(tf.get_variable_scope(), reuse=True):
    dgz = discriminator(gz)
  batch_size = tf.shape(x)[0]
  alpha = tf.random_uniform([batch_size])
  xhat = x * alpha + gz * (1 - alpha)
  with tf.variable_scope(tf.get_variable_scope(), reuse=True):
    dxhat = discriminator(xhat)
  gnorm = tf.norm(tf.gradients(dxhat, xhat)[0])
  d_loss = -tf.reduce_mean(dx - dgz - beta * tf.square(gnorm - 1))
  g_loss = -tf.reduce_mean(dgz)
  return d_loss, g_loss
项目:Sohu-LuckData-Image-Text-Matching-Competition    作者:WeitaoVan    | 项目源码 | 文件源码
def sentence_concat(self, tfidf, lda, reuse=False):
        with tf.variable_scope('sentence_concat', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)

            tfidf_fc1 = tf.contrib.layers.fully_connected(tfidf, 2048, weights_regularizer=wd, scope='tfidf_fc1')   
            lda_fc1 = tf.contrib.layers.fully_connected(lda, 64, scope='lda_fc1')
            feat_concat = tf.concat([tfidf_fc1, lda_fc1], axis=1)
            #drop_fc1 = tf.nn.dropout(feat_concat, self.keep_prob, name='drop_fc1')
            sentence_fc2 = tf.contrib.layers.fully_connected(feat_concat, 512,activation_fn=None, weights_regularizer=wd, scope='s_fc2')
            sentence_fc2_bn = tf.contrib.layers.batch_norm(sentence_fc2, center=True, scale=True, is_training=self.is_training,
                                                           reuse=reuse, decay=0.999, updates_collections=None, 
                                                                   scope='s_fc2_bn')    
            embed = sentence_fc2_bn/tf.norm(sentence_fc2_bn, axis= -1, keep_dims=True)

        self.endpoint['tfidf_fc1'] = tfidf_fc1
        self.endpoint['lda_fc1'] = lda_fc1  
        self.endpoint['concat_embed'] = embed
        return embed
项目:Sohu-LuckData-Image-Text-Matching-Competition    作者:WeitaoVan    | 项目源码 | 文件源码
def sentence_concat(self, tfidf, lda, reuse=False):
        with tf.variable_scope('sentence_concat', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)

            tfidf_fc1 = tf.contrib.layers.fully_connected(tfidf, 2048, weights_regularizer=wd, scope='tfidf_fc1')   
            lda_fc1 = tf.contrib.layers.fully_connected(lda, 64, scope='lda_fc1')
            feat_concat = tf.concat([tfidf_fc1, lda_fc1], axis=1)
            #drop_fc1 = tf.nn.dropout(feat_concat, self.keep_prob, name='drop_fc1')
            sentence_fc2 = tf.contrib.layers.fully_connected(feat_concat, 512,activation_fn=None, weights_regularizer=wd, scope='s_fc2')
            sentence_fc2_bn = tf.contrib.layers.batch_norm(sentence_fc2, center=True, scale=True, is_training=self.is_training,
                                                           reuse=reuse, decay=0.999, updates_collections=None, 
                                                                   scope='s_fc2_bn')    
            embed = sentence_fc2_bn/tf.norm(sentence_fc2_bn, axis= -1, keep_dims=True)

        self.endpoint['tfidf_fc1'] = tfidf_fc1
        self.endpoint['lda_fc1'] = lda_fc1  
        self.endpoint['concat_embed'] = embed
        return embed
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _numerically_stable_global_norm(tensor_list):
    """Compute the global norm of a list of Tensors, with improved stability.

    The global norm computation sometimes overflows due to the intermediate L2
    step. To avoid this, we divide by a cheap-to-compute max over the
    matrix elements.

    Args:
      tensor_list: A list of tensors, or `None`.

    Returns:
      A scalar tensor with the global norm.
    """
    if np.all([x is None for x in tensor_list]):
        return 0.0

    list_max = tf.reduce_max([tf.reduce_max(tf.abs(x)) for x in
                              tensor_list if x is not None])
    return list_max * tf.global_norm([x / list_max for x in tensor_list
                                      if x is not None])
项目:ml_gans    作者:imironhead    | 项目源码 | 文件源码
def repelling_regularizer(bottleneck):
    """
    pulling away, repelling regularizer.

    bottleneck:
        the bottlenect layer in the autoencoder.
    """
    s = tf.contrib.layers.flatten(bottleneck)
    n = tf.cast(tf.shape(s)[0], tf.float32)

    sxst = tf.matmul(s, s, transpose_b=True)

    sn = tf.norm(s, 1, axis=1, keep_dims=True)

    snxsnt = tf.matmul(sn, sn, transpose_b=True)

    total = tf.square(sxst / snxsnt)
    total = tf.reduce_sum(total)

    return 0.1 * (total - n) / (n * (n - 1.0))
项目:GAN-general    作者:weilinie    | 项目源码 | 文件源码
def cal_grad_penalty(self, real_data, fake_data):
        # WGAN lipschitz-penalty
        epsilon = tf.random_uniform(shape=[self.batch_size, 1, 1], minval=0., maxval=1.)

        data_diff = fake_data - real_data
        interp_data = real_data + epsilon * data_diff
        disc_interp, _ = discriminator(
            self.d_net, interp_data, self.conv_hidden_num,
            self.normalize_d
        )
        grad_interp = tf.gradients(disc_interp, [interp_data])[0]
        print('The shape of grad_interp: {}'.format(grad_interp.get_shape().as_list()))
        grad_interp_flat = tf.reshape(grad_interp, [self.batch_size, -1])
        slope = tf.norm(grad_interp_flat, axis=1)
        print('The shape of slope: {}'.format(slope.get_shape().as_list()))

        grad_penalty = tf.reduce_mean((slope - 1.) ** 2)
        return grad_penalty
项目:GAN-general    作者:weilinie    | 项目源码 | 文件源码
def cal_one_side_grad_penalty(self, real_data, fake_data):
        # WGAN lipschitz-penalty
        epsilon = tf.random_uniform(shape=[self.batch_size, 1, 1], minval=0., maxval=1.)

        data_diff = fake_data - real_data
        interp_data = real_data + epsilon * data_diff
        disc_interp, _ = discriminator(
            self.d_net, interp_data, self.conv_hidden_num,
            self.normalize_d
        )
        grad_interp = tf.gradients(disc_interp, [interp_data])[0]
        print('The shape of grad_interp: {}'.format(grad_interp.get_shape().as_list()))
        grad_interp_flat = tf.reshape(grad_interp, [self.batch_size, -1])
        slope = tf.norm(grad_interp_flat, axis=1)
        print('The shape of slope: {}'.format(slope.get_shape().as_list()))

        grad_penalty = tf.reduce_mean(tf.nn.relu(slope - 1.) ** 2)
        return grad_penalty
项目:GAN-general    作者:weilinie    | 项目源码 | 文件源码
def cal_real_nearby_grad_penalty(self, real_data):
        # WGAN lipschitz-penalty
        epsilon = tf.random_uniform(shape=[self.batch_size, 1, 1], minval=0., maxval=1.)

        data_diff = get_perturbed_batch(real_data) - real_data
        interp_data = real_data + epsilon * data_diff
        disc_real_nearby, _ = discriminator(
            self.d_net, interp_data, self.conv_hidden_num,
            self.normalize_d
        )
        grad_real_nearby = tf.gradients(disc_real_nearby, [interp_data])[0]
        print('The shape of grad_real_nearby: {}'.format(grad_real_nearby.get_shape().as_list()))
        grad_real_nearby_flat = tf.reshape(grad_real_nearby, [self.batch_size, -1])
        slope = tf.norm(grad_real_nearby_flat, axis=1)
        print('The shape of slope: {}'.format(slope.get_shape().as_list()))

        grad_penalty = tf.reduce_mean((slope - 1.) ** 2)
        return grad_penalty
项目:LifelongVAE    作者:jramapuram    | 项目源码 | 文件源码
def _create_optimizer(self, tvars, cost, lr):
        # optimizer = tf.train.rmspropoptimizer(self.learning_rate)
        optimizer = tf.train.AdamOptimizer(learning_rate=lr)

        print 'there are %d trainable vars in cost %s\n' % (len(tvars), cost.name)
        grads = tf.gradients(cost, tvars)

        # DEBUG: exploding gradients test with this:
        # for index in range(len(grads)):
        #     if grads[index] is not None:
        #         gradstr = "\n grad [%i] | tvar [%s] =" % (index, tvars[index].name)
        #         grads[index] = tf.Print(grads[index], [grads[index]], gradstr, summarize=100)

        # grads, _ = tf.clip_by_global_norm(grads, 5.0)
        self.grad_norm = tf.norm(tf.concat([tf.reshape(t, [-1]) for t in grads],
                                           axis=0))
        return optimizer.apply_gradients(zip(grads, tvars))
        # return tf.train.AdamOptimizer(learning_rate=lr).minimize(cost, var_list=tvars)
项目:LifelongVAE    作者:jramapuram    | 项目源码 | 文件源码
def _create_optimizer(self, tvars, cost, lr):
        # optimizer = tf.train.rmspropoptimizer(self.learning_rate)
        optimizer = tf.train.AdamOptimizer(learning_rate=lr)

        print 'there are %d trainable vars in cost %s\n' % (len(tvars), cost.name)
        grads = tf.gradients(cost, tvars)

        # DEBUG: exploding gradients test with this:
        # for index in range(len(grads)):
        #     if grads[index] is not None:
        #         gradstr = "\n grad [%i] | tvar [%s] =" % (index, tvars[index].name)
        #         grads[index] = tf.Print(grads[index], [grads[index]], gradstr, summarize=100)

        # grads, _ = tf.clip_by_global_norm(grads, 5.0)
        self.grad_norm = tf.norm(tf.concat([tf.reshape(t, [-1]) for t in grads],
                                           axis=0))
        return optimizer.apply_gradients(zip(grads, tvars))
        # return tf.train.AdamOptimizer(learning_rate=lr).minimize(cost, var_list=tvars)
项目:recurrent-entity-networks    作者:jimfleming    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        with tf.variable_scope(scope or type(self).__name__, initializer=self._initializer):
            U = tf.get_variable('U', [self._num_units_per_block, self._num_units_per_block],
                                initializer=self._recurrent_initializer)
            V = tf.get_variable('V', [self._num_units_per_block, self._num_units_per_block],
                                initializer=self._recurrent_initializer)
            W = tf.get_variable('W', [self._num_units_per_block, self._num_units_per_block],
                                initializer=self._recurrent_initializer)

            U_bias = tf.get_variable('U_bias', [self._num_units_per_block])

            # Split the hidden state into blocks (each U, V, W are shared across blocks).
            state = tf.split(state, self._num_blocks, axis=1)

            next_states = []
            for j, state_j in enumerate(state): # Hidden State (j)
                key_j = tf.expand_dims(self._keys[j], axis=0)
                gate_j = self.get_gate(state_j, key_j, inputs)
                candidate_j = self.get_candidate(state_j, key_j, inputs, U, V, W, U_bias)

                # Equation 4: h_j <- h_j + g_j * h_j^~
                # Perform an update of the hidden state (memory).
                state_j_next = state_j + tf.expand_dims(gate_j, -1) * candidate_j

                # Equation 5: h_j <- h_j / \norm{h_j}
                # Forget previous memories by normalization.
                state_j_next_norm = tf.norm(
                    tensor=state_j_next,
                    ord='euclidean',
                    axis=-1,
                    keep_dims=True)
                state_j_next_norm = tf.where(
                    tf.greater(state_j_next_norm, 0.0),
                    state_j_next_norm,
                    tf.ones_like(state_j_next_norm))
                state_j_next = state_j_next / state_j_next_norm

                next_states.append(state_j_next)
            state_next = tf.concat(next_states, axis=1)
        return state_next, state_next
项目:ISLES2017    作者:MiguelMonteiro    | 项目源码 | 文件源码
def dice_coefficient(volume_1, volume_2):
    with tf.variable_scope('calc_dice_coefficient'):
        intersection = tf.reduce_sum(volume_1 * volume_2)
        size_i1 = tf.norm(volume_1, ord=1)
        size_i2 = tf.norm(volume_2, ord=1)
        return 2 * intersection / (size_i1 + size_i2)
项目:ISLES2017    作者:MiguelMonteiro    | 项目源码 | 文件源码
def soft_dice_loss(logits, ground_truth):
    probabilities = tf.sigmoid(logits)
    interception_volume = tf.reduce_sum(probabilities * ground_truth)
    return - 2 * interception_volume / (tf.norm(ground_truth, ord=1) + tf.norm(probabilities, ord=1))
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def f(self, net, dx, dg):
        # Note: this is currently not working that well. we might need a second sample of X 

        return tf.norm(net - dg, axis=1) - tf.norm(dx, axis=1)
项目:DeepVideo    作者:AniketBajpai    | 项目源码 | 文件源码
def build_summary(self):
        # Distribution of encoder activations
        tf.summary.histogram('encoder/conv1_outputs', self.net['conv1_outputs'])
        tf.summary.histogram('encoder/conv2_outputs', self.net['conv2_outputs'])
        tf.summary.histogram('encoder/conv3_outputs', self.net['conv3_outputs'])

        # Encoder weights, biases
        tf.summary.scalar('encoder/w1', tf.norm(self.net['w1']))
        tf.summary.scalar('encoder/w2', tf.norm(self.net['w2']))
        tf.summary.scalar('encoder/w3', tf.norm(self.net['w3']))

        tf.summary.scalar('encoder/b1', tf.norm(self.net['b1']))
        tf.summary.scalar('encoder/b2', tf.norm(self.net['b2']))
        tf.summary.scalar('encoder/b3', tf.norm(self.net['b3']))
项目:DeepVideo    作者:AniketBajpai    | 项目源码 | 文件源码
def build_summary(self, name):
        # Distribution of generator activations
        tf.summary.histogram('generator/{}/f_deconv2_outputs'.format(name), self.net['f_deconv2_outputs'])
        tf.summary.histogram('generator/{}/f_deconv3_outputs'.format(name), self.net['f_deconv3_outputs'])
        tf.summary.histogram('generator/{}/f_deconv4_outputs'.format(name), self.net['f_deconv4_outputs'])
        tf.summary.histogram('generator/{}/f_deconv5i_outputs'.format(name), self.net['f_deconv5i_outputs'])
        tf.summary.histogram('generator/{}/f_deconv5m_outputs'.format(name), self.net['f_deconv5m_outputs'])
        tf.summary.histogram('generator/{}/b_deconv2_outputs'.format(name), self.net['b_deconv2_outputs'])
        tf.summary.histogram('generator/{}/b_deconv3_outputs'.format(name), self.net['b_deconv3_outputs'])
        tf.summary.histogram('generator/{}/b_deconv4_outputs'.format(name), self.net['b_deconv4_outputs'])
        tf.summary.histogram('generator/{}/b_deconv5_outputs'.format(name), self.net['b_deconv5_outputs'])

        # Generator weights, biases
        tf.summary.scalar('generator/{}/w2_f'.format(name), tf.norm(self.net['w2_f']))
        tf.summary.scalar('generator/{}/w3_f'.format(name), tf.norm(self.net['w3_f']))
        tf.summary.scalar('generator/{}/w4_f'.format(name), tf.norm(self.net['w4_f']))
        tf.summary.scalar('generator/{}/w5_fi'.format(name), tf.norm(self.net['w5_fi']))
        tf.summary.scalar('generator/{}/w5_fm'.format(name), tf.norm(self.net['w5_fm']))
        tf.summary.scalar('generator/{}/w2_b'.format(name), tf.norm(self.net['w2_b']))
        tf.summary.scalar('generator/{}/w3_b'.format(name), tf.norm(self.net['w3_b']))
        tf.summary.scalar('generator/{}/w4_b'.format(name), tf.norm(self.net['w4_b']))
        tf.summary.scalar('generator/{}/w5_b'.format(name), tf.norm(self.net['w5_b']))

        tf.summary.scalar('generator/{}/b2_f'.format(name), tf.norm(self.net['b2_f']))
        tf.summary.scalar('generator/{}/b3_f'.format(name), tf.norm(self.net['b3_f']))
        tf.summary.scalar('generator/{}/b4_f'.format(name), tf.norm(self.net['b4_f']))
        tf.summary.scalar('generator/{}/b5_fi'.format(name), tf.norm(self.net['b5_fi']))
        tf.summary.scalar('generator/{}/b5_fm'.format(name), tf.norm(self.net['b5_fm']))
        tf.summary.scalar('generator/{}/b2_b'.format(name), tf.norm(self.net['b2_b']))
        tf.summary.scalar('generator/{}/b3_b'.format(name), tf.norm(self.net['b3_b']))
        tf.summary.scalar('generator/{}/b4_b'.format(name), tf.norm(self.net['b4_b']))
        tf.summary.scalar('generator/{}/b5_b'.format(name), tf.norm(self.net['b5_b']))
项目:QDREN    作者:andreamad8    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        with tf.variable_scope(scope or type(self).__name__, initializer=self._initializer):
            # Split the hidden state into blocks (each U, V, W are shared across blocks).

            U = tf.get_variable('U', [self._num_units_per_block, self._num_units_per_block])
            V = tf.get_variable('V', [self._num_units_per_block, self._num_units_per_block])
            W = tf.get_variable('W', [self._num_units_per_block, self._num_units_per_block])

            b = tf.get_variable('biasU',[self._num_units_per_block])

            state = tf.split(state, self._num_blocks, 1)
            next_states = []
            for j, state_j in enumerate(state): # Hidden State (j)
                key_j = self._keys[j]
                gate_j = self.get_gate(state_j, key_j, inputs)
                candidate_j = self.get_candidate(state_j, key_j, inputs, U, V, W, b)

                # Equation 4: h_j <- h_j + g_j * h_j^~
                # Perform an update of the hidden state (memory).
                state_j_next = state_j + tf.expand_dims(gate_j, -1) * candidate_j

                # # Forget previous memories by normalization.
                state_j_next = tf.nn.l2_normalize(state_j_next, -1) # TODO: Is epsilon necessary?

                # Equation 5: h_j <- h_j / \norm{h_j}
                # Forget previous memories by normalization.
                # state_j_next_norm = tf.norm(tensor=state_j_next,
                #                             ord='euclidean',
                #                             axis=-1,
                #                             keep_dims=True)
                # state_j_next_norm = tf.where(
                #     tf.greater(state_j_next_norm, 0.0),
                #     state_j_next_norm,
                #     tf.ones_like(state_j_next_norm))
                # state_j_next = state_j_next / state_j_next_norm


                next_states.append(state_j_next)
            state_next = tf.concat(next_states, 1)
        return state_next, state_next
项目:QDREN    作者:andreamad8    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        with tf.variable_scope(scope or type(self).__name__, initializer=self._initializer):
            # Split the hidden state into blocks (each U, V, W are shared across blocks).

            U = tf.get_variable('U', [self._num_units_per_block, self._num_units_per_block])
            V = tf.get_variable('V', [self._num_units_per_block, self._num_units_per_block])
            W = tf.get_variable('W', [self._num_units_per_block, self._num_units_per_block])

            b = tf.get_variable('biasU',[self._num_units_per_block])

            state = tf.split(state, self._num_blocks, 1)
            next_states = []
            for j, state_j in enumerate(state): # Hidden State (j)
                key_j = self._keys[j]
                gate_j = self.get_gate(state_j, key_j, inputs)
                candidate_j = self.get_candidate(state_j, key_j, inputs, U, V, W, b)

                # Equation 4: h_j <- h_j + g_j * h_j^~
                # Perform an update of the hidden state (memory).
                state_j_next = state_j + tf.expand_dims(gate_j, -1) * candidate_j

                # # Forget previous memories by normalization.
                # Equation 5: h_j <- h_j / \norm{h_j}
                state_j_next = tf.nn.l2_normalize(state_j_next, -1) # TODO: Is epsilon necessary?


                # Forget previous memories by normalization.
                # state_j_next_norm = tf.norm(tensor=state_j_next,
                #                             ord='euclidean',
                #                             axis=-1,
                #                             keep_dims=True)
                # state_j_next_norm = tf.where(
                #     tf.greater(state_j_next_norm, 0.0),
                #     state_j_next_norm,
                #     tf.ones_like(state_j_next_norm))
                # state_j_next = state_j_next / state_j_next_norm


                next_states.append(state_j_next)
            state_next = tf.concat(next_states, 1)
        return state_next, state_next
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def conv1d_weightnorm(inputs, layer_idx, out_dim, kernel_size, padding="SAME", dropout=1.0,  var_scope_name="conv_layer"):    #padding should take attention

  with tf.variable_scope("conv_layer_"+str(layer_idx)):
    in_dim = int(inputs.get_shape()[-1])
    V = tf.get_variable('V', shape=[kernel_size, in_dim, out_dim], dtype=tf.float32, initializer=tf.random_normal_initializer(mean=0, stddev=tf.sqrt(4.0*dropout/(kernel_size*in_dim))), trainable=True)
    V_norm = tf.norm(V.initialized_value(), axis=[0,1])  # V shape is M*N*k,  V_norm shape is k  
    g = tf.get_variable('g', dtype=tf.float32, initializer=V_norm, trainable=True)
    b = tf.get_variable('b', shape=[out_dim], dtype=tf.float32, initializer=tf.zeros_initializer(), trainable=True)

    # use weight normalization (Salimans & Kingma, 2016)
    W = tf.reshape(g, [1,1,out_dim])*tf.nn.l2_normalize(V,[0,1])
    inputs = tf.nn.bias_add(tf.nn.conv1d(value=inputs, filters=W, stride=1, padding=padding), b)   
    return inputs
项目:source_separation_ml_jeju    作者:hjkwon0609    | 项目源码 | 文件源码
def add_loss_op(self, voice_spec, song_spec):
        if not EmbeddingConfig.use_vpnn:
            # concatenate all batches into one axis  [num_batches * time_frames, freq_bins]
            voice_spec = tf.reshape(voice_spec, [-1, EmbeddingConfig.num_freq_bins])
            song_spec = tf.reshape(song_spec, [-1, EmbeddingConfig.num_freq_bins])

        self.voice_spec = voice_spec  # for output
        self.song_spec = song_spec

        song_spec_mask = tf.cast(tf.abs(song_spec) > tf.abs(voice_spec), tf.float32)
        voice_spec_mask =  tf.ones(song_spec_mask.get_shape()) - song_spec_mask

        V = self.embedding
        Y = tf.transpose([song_spec_mask, voice_spec_mask], [1, 2, 0])  # [num_batch, num_freq_bins, 2]

        # A_pred = tf.matmul(V, tf.transpose(V, [0, 2, 1]))
        # A_target = tf.matmul(Y, tf.transpose(Y, [0, 2, 1]))
        error = tf.reduce_mean(tf.square(tf.matmul(V, tf.transpose(V, [0, 2, 1])) - tf.matmul(Y, tf.transpose(Y, [0, 2, 1]))))  # average error per TF bin

        # tf.summary.histogram('a_same cluster embedding distribution', A_pred * A_target)
        # tf.summary.histogram('a_different cluster embedding distribution', A_pred * (1 - A_target))

        # tf.summary.histogram('V', V)
        # tf.summary.histogram('V V^T', A_pred)

        l2_cost = tf.reduce_sum([tf.norm(v) for v in tf.trainable_variables() if len(v.get_shape().as_list()) == 2])

        self.loss = EmbeddingConfig.l2_lambda * l2_cost + error

        # tf.summary.scalar("avg_loss", self.loss)
        # tf.summary.scalar('regularizer cost', EmbeddingConfig.l2_lambda * l2_cost)
项目:source_separation_ml_jeju    作者:hjkwon0609    | 项目源码 | 文件源码
def add_training_op(self):
        # learning_rate = tf.train.exponential_decay(EmbeddingConfig.lr, self.global_step, 50, 0.96)
        optimizer = tf.train.AdamOptimizer(learning_rate=EmbeddingConfig.lr, beta1=EmbeddingConfig.beta1, beta2=EmbeddingConfig.beta2)
        # optimizer = tf.train.MomentumOptimizer(learning_rate=EmbeddingConfig.lr, momentum=0.9)
        # optimizer = tf.train.RMSPropOptimizer(learning_rate=EmbeddingConfig.lr, epsilon=1e-6)
        grads = optimizer.compute_gradients(self.loss)
        grads = [(tf.clip_by_norm(grad, 100000), var) for grad, var in grads if grad is not None]
        # grads = [(grad + tf.random_normal(shape=grad.get_shape(), stddev=0.6), var) for grad, var in grads if grad is not None]
        # for grad, var in grads:
            # if grad is not None:
                # tf.summary.scalar('gradient_%s' % (var), tf.norm(grad))
                # tf.summary.histogram('gradient_%s' % (var), grad)
        self.optimizer = optimizer.apply_gradients(grads, global_step=self.global_step)
项目:source_separation_ml_jeju    作者:hjkwon0609    | 项目源码 | 文件源码
def add_loss_op(self, target):
        self.target = target  # for outputting later
        real_target = tf.abs(self.target)

        # mean = tf.concat([stats[0][0], stats[0][1]])
        # stdev = tf.concat([stats[1][0], stats[1][1]])

        # print(mean.get_shape())
        # print(stdev.get_shape())

        # real_target -= mean
        # real_target /= stdev

        delta = self.output - real_target 
        squared_error = tf.reduce_mean(tf.pow(delta, 2)) 

        l2_cost = tf.reduce_mean([tf.norm(v) for v in tf.trainable_variables() if len(v.get_shape().as_list()) == 3])

        self.loss = Config.l2_lambda * l2_cost + squared_error

        tf.summary.scalar("loss", self.loss)

        masked_loss = tf.abs(self.soft_masked_output) - real_target
        self.masked_loss = Config.l2_lambda * l2_cost + tf.reduce_mean(tf.pow(masked_loss, 2))
        tf.summary.scalar('masked_loss', self.masked_loss)
        tf.summary.scalar('regularization_cost', Config.l2_lambda * l2_cost)
项目:jupyter_tensorboard    作者:lspvic    | 项目源码 | 文件源码
def tf_logs(tmpdir_factory):

    import numpy as np
    import tensorflow as tf
    x = np.random.rand(5)
    y = 3 * x + 1 + 0.05 * np.random.rand(5)

    a = tf.Variable(0.1)
    b = tf.Variable(0.)
    err = a*x+b-y

    loss = tf.norm(err)
    tf.summary.scalar("loss", loss)
    tf.summary.scalar("a", a)
    tf.summary.scalar("b", b)
    merged = tf.summary.merge_all()

    optimizor = tf.train.GradientDescentOptimizer(0.01).minimize(loss)

    with tf.Session() as sess:
        log_dir = tmpdir_factory.mktemp("logs", numbered=False)
        log_dir = str(log_dir)

        train_write = tf.summary.FileWriter(log_dir, sess.graph)
        tf.global_variables_initializer().run()
        for i in range(1000):
            _, merged_ = sess.run([optimizor, merged])
            train_write.add_summary(merged_, i)

    return log_dir
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def build_input(self):
        # positive
        self.raw_sentence= tf.placeholder(tf.float32, shape=[self.batch_size,18000],name='raw_sentence')
        self.sentence_emb =self.raw_sentence/tf.norm(self.raw_sentence,axis=-1,keep_dims=True) #tf.nn.embedding_lookup(tf.get_variable('word_embedding',[4096,512]),self.raw_sentence)
        self.image_feat = tf.placeholder(tf.float32,shape=[self.batch_size,4096], name='image_features')  
        self.image_feat_norm = self.image_feat/tf.norm(self.image_feat,axis=-1,keep_dims=True)
        self.sen_feat_norm = self.sentence_emb/tf.norm(self.sentence_emb,axis=-1,keep_dims=True)
        self.im_similarity = tf.matmul(self.image_feat_norm,self.image_feat_norm,transpose_b=True)
        self.sen_similarity =tf.matmul(self.sen_feat_norm,self.sen_feat_norm,transpose_b=True)
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def build_input(self):
        # positive
        self.raw_sentence= tf.placeholder(tf.float32, shape=[self.batch_size,18000],name='raw_sentence')
        self.sentence_emb =self.raw_sentence/tf.norm(self.raw_sentence,axis=-1,keep_dims=True) #tf.nn.embedding_lookup(tf.get_variable('word_embedding',[4096,512]),self.raw_sentence)
        self.image_feat = tf.placeholder(tf.float32,shape=[self.batch_size,4096], name='image_features')  
        self.image_feat_norm = self.image_feat/tf.norm(self.image_feat,axis=-1,keep_dims=True)
        self.sen_feat_norm = self.sentence_emb/tf.norm(self.sentence_emb,axis=-1,keep_dims=True)
        self.im_similarity = tf.matmul(self.image_feat_norm,self.image_feat_norm,transpose_b=True)
        self.sen_similarity =tf.matmul(self.sen_feat_norm,self.sen_feat_norm,transpose_b=True)
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def sentencenet(self, sentence_emb, reuse=False):
        with tf.variable_scope('sentence_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            sentence_fc1 =tf.nn.dropout(tf.contrib.layers.fully_connected(sentence_emb,2048, \
                                                            weights_regularizer=wd, scope='s_fc1'),keep_prob=self.keep_prob )# 20*10*256
            sentence_fc2 = tf.contrib.layers.fully_connected(sentence_fc1, 512,activation_fn=None,normalizer_fn=tf.contrib.layers.batch_norm,\
                                                             normalizer_params={'is_training':self.is_training,'updates_collections':None}, weights_regularizer=wd, scope='s_fc2')
            sentence_fc2 = sentence_fc2/tf.norm(sentence_fc2,axis= -1,keep_dims=True)
        self.endpoint['sentence_fc1'] = sentence_fc1
        self.endpoint['sentence_fc2'] = sentence_fc2
        return sentence_fc2
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def imagenet(self, image_feat, reuse=False,skip=False):
        if skip:
            return image_feat
        with tf.variable_scope('image_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            image_fc1 = tf.nn.dropout(tf.contrib.layers.fully_connected(image_feat,2048, weights_regularizer=wd,scope='i_fc1'),keep_prob=self.keep_prob)
            #drop_fc1 = tf.nn.dropout(image_fc1, self.keep_prob, name='drop_fc1')
            image_fc2 = tf.contrib.layers.fully_connected(image_fc1, 512, activation_fn=None, weights_regularizer=wd, scope='i_fc2')
            image_fc2_bn = tf.contrib.layers.batch_norm(image_fc2, center=True, scale=True, is_training=self.is_training, 
                                                        reuse=reuse, decay=0.999, updates_collections=None, 
                                                        scope='i_fc2_bn')
            embed = image_fc2_bn / tf.norm(image_fc2_bn,axis=-1,keep_dims=True)
        self.endpoint['image_fc1'] = image_fc1
        self.endpoint['image_fc2'] = embed
        return embed
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def sentencenet(self, input_tensor, reuse=False):
        with tf.variable_scope('sentence_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)

            sentence_fc1 = tf.contrib.layers.fully_connected(input_tensor, 2048, weights_regularizer=wd, scope='s_fc1')
            #drop_fc1 = tf.nn.dropout(sentence_fc1, self.keep_prob, name='drop_fc1')
            sentence_fc2 = tf.contrib.layers.fully_connected(sentence_fc1, 512,activation_fn=None, weights_regularizer=wd, scope='s_fc2')
            sentence_fc2_bn = tf.contrib.layers.batch_norm(sentence_fc2, center=True, scale=True, is_training=self.is_training,
                                                           reuse=reuse, decay=0.999, updates_collections=None, 
                                                           scope='s_fc2_bn')
            embed = sentence_fc2_bn/tf.norm(sentence_fc2_bn,axis= -1,keep_dims=True)
        self.endpoint['sentence_fc1'] = sentence_fc1
        self.endpoint['sentence_fc2'] = embed
        return embed
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def build_input(self):
        # positive
        self.raw_sentence= tf.placeholder(tf.float32, shape=[self.batch_size,18000],name='raw_sentence')
        self.sentence_emb =tf.sign(self.raw_sentence)*tf.pow(tf.abs(self.raw_sentence),0.5)/tf.norm(self.raw_sentence,axis=1,keep_dims=True) #tf.nn.embedding_lookup(tf.get_variable('word_embedding',[4096,512]),self.raw_sentence)
        self.image_feat = tf.placeholder(tf.float32,shape=[self.batch_size,4096], name='image_features')
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def sentencenet(self, sentence_emb, reuse=False):
        with tf.variable_scope('sentence_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            sentence_fc1 =tf.nn.dropout(tf.contrib.layers.fully_connected(sentence_emb,2048, \
                                                            weights_regularizer=wd, scope='s_fc1'),keep_prob=self.keep_prob) # 20*10*256
            sentence_fc2 = tf.contrib.layers.fully_connected(sentence_fc1, 512,activation_fn=None,normalizer_fn=tf.contrib.layers.batch_norm,\
                                                             normalizer_params={'is_training':self.is_training,'updates_collections':None}, weights_regularizer=wd, scope='s_fc2')
            sentence_fc2 = sentence_fc2/tf.norm(sentence_fc2,axis= -1,keep_dims=True)
        self.endpoint['sentence_fc1'] = sentence_fc1
        self.endpoint['sentence_fc2'] = sentence_fc2
        return sentence_fc2
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def imagenet(self, image_feat, reuse=False,skip=False):
        if skip:
            return image_feat
        with tf.variable_scope('image_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            image_fc1 = tf.nn.dropout(tf.contrib.layers.fully_connected(image_feat,2048, weights_regularizer=wd,scope='i_fc1'),keep_prob=self.keep_prob)
            #drop_fc1 = tf.nn.dropout(image_fc1, self.keep_prob, name='drop_fc1')
            image_fc2 = tf.contrib.layers.fully_connected(image_fc1, 512, activation_fn=None, weights_regularizer=wd, scope='i_fc2')
            image_fc2_bn = tf.contrib.layers.batch_norm(image_fc2, center=True, scale=True, is_training=self.is_training, 
                                                        reuse=reuse, decay=0.999, updates_collections=None, 
                                                        scope='i_fc2_bn')
            embed = image_fc2_bn / tf.norm(image_fc2_bn,axis=-1,keep_dims=True)
        self.endpoint['image_fc1'] = image_fc1
        self.endpoint['image_fc2'] = embed
        return embed
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def sentencenet(self, sentence_emb, reuse=False):
        with tf.variable_scope('sentence_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            sentence_fc1 =tf.contrib.layers.fully_connected(sentence_emb,2048, \
                                                            weights_regularizer=wd, scope='s_fc1') # 20*10*256
            sentence_fc2 = tf.contrib.layers.fully_connected(sentence_fc1, 512,activation_fn=None,normalizer_fn=tf.contrib.layers.batch_norm,\
                                                             normalizer_params={'is_training':self.is_training,'updates_collections':None}, weights_regularizer=wd, scope='s_fc2')
            sentence_fc2 = sentence_fc2/tf.norm(sentence_fc2,axis= -1,keep_dims=True)
        self.endpoint['sentence_fc1'] = sentence_fc1
        self.endpoint['sentence_fc2'] = sentence_fc2
        return sentence_fc2
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def imagenet(self, image_feat, reuse=False,skip=False):
        if skip:
            return image_feat
        with tf.variable_scope('image_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            image_fc1 = tf.contrib.layers.fully_connected(image_feat,2048, weights_regularizer=wd,scope='i_fc1')
            #drop_fc1 = tf.nn.dropout(image_fc1, self.keep_prob, name='drop_fc1')
            image_fc2 = tf.contrib.layers.fully_connected(image_fc1, 512, activation_fn=None, weights_regularizer=wd, scope='i_fc2')
            image_fc2_bn = tf.contrib.layers.batch_norm(image_fc2, center=True, scale=True, is_training=self.is_training, 
                                                        reuse=reuse, decay=0.999, updates_collections=None, 
                                                        scope='i_fc2_bn')
            embed = image_fc2_bn / tf.norm(image_fc2_bn,axis=-1,keep_dims=True)
        self.endpoint['image_fc1'] = image_fc1
        self.endpoint['image_fc2'] = embed
        return embed
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def sentencenet(self, sentence_emb, reuse=False):
        with tf.variable_scope('sentence_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            sentence_fc1 =tf.nn.dropout(tf.contrib.layers.fully_connected(sentence_emb,2048, \
                                                            weights_regularizer=wd, scope='s_fc1'),keep_prob=self.keep_prob )# 20*10*256
            sentence_fc2 = tf.contrib.layers.fully_connected(sentence_fc1, 512,activation_fn=None,normalizer_fn=tf.contrib.layers.batch_norm,\
                                                             normalizer_params={'is_training':self.is_training,'updates_collections':None}, weights_regularizer=wd, scope='s_fc2')
            sentence_fc2 = sentence_fc2/tf.norm(sentence_fc2,axis= -1,keep_dims=True)
        self.endpoint['sentence_fc1'] = sentence_fc1
        self.endpoint['sentence_fc2'] = sentence_fc2
        return sentence_fc2
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def imagenet(self, image_feat, reuse=False,skip=False):
        if skip:
            return image_feat
        with tf.variable_scope('image_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            image_fc1 = tf.nn.dropout(tf.contrib.layers.fully_connected(image_feat,2048, weights_regularizer=wd,scope='i_fc1'),keep_prob=self.keep_prob)
            #drop_fc1 = tf.nn.dropout(image_fc1, self.keep_prob, name='drop_fc1')
            image_fc2 = tf.contrib.layers.fully_connected(image_fc1, 512, activation_fn=None, weights_regularizer=wd, scope='i_fc2')
            image_fc2_bn = tf.contrib.layers.batch_norm(image_fc2, center=True, scale=True, is_training=self.is_training, 
                                                        reuse=reuse, decay=0.999, updates_collections=None, 
                                                        scope='i_fc2_bn')
            embed = image_fc2_bn / tf.norm(image_fc2_bn,axis=-1,keep_dims=True)
        self.endpoint['image_fc1'] = image_fc1
        self.endpoint['image_fc2'] = embed
        return embed
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def build_input(self):
        # positive
        self.labels = tf.placeholder(tf.float32, shape=[None,self.num_class], name='concept_labels')
        self.raw_sentence= tf.placeholder(tf.float32, shape=[self.batch_size,9000],name='raw_sentence')
        self.sentence_emb =self.raw_sentence/tf.norm(self.raw_sentence,axis=-1,keep_dims=True) #tf.nn.embedding_lookup(tf.get_variable('word_embedding',[4096,512]),self.raw_sentence)
        self.image_feat = tf.placeholder(tf.float32,shape=[self.batch_size,4096], name='image_features')  
        self.image_feat_norm = self.image_feat/tf.norm(self.image_feat,axis=-1,keep_dims=True)
        self.sen_feat_norm = self.sentence_emb/tf.norm(self.sentence_emb,axis=-1,keep_dims=True)
        self.im_similarity = tf.matmul(self.image_feat_norm,self.image_feat_norm,transpose_b=True)
        self.sen_similarity =tf.matmul(self.sen_feat_norm,self.sen_feat_norm,transpose_b=True)
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def sentencenet(self, sentence_emb, reuse=False):
        with tf.variable_scope('sentence_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            sentence_fc1 =tf.nn.dropout(tf.contrib.layers.fully_connected(sentence_emb,2048, \
                                                            weights_regularizer=wd, scope='s_fc1'),keep_prob=self.keep_prob )# 20*10*256
            sentence_fc2 = tf.contrib.layers.fully_connected(sentence_fc1, 512,activation_fn=None,normalizer_fn=tf.contrib.layers.batch_norm,\
                                                             normalizer_params={'is_training':self.is_training,'updates_collections':None}, weights_regularizer=wd, scope='s_fc2')
            sentence_fc2 = sentence_fc2/tf.norm(sentence_fc2,axis= -1,keep_dims=True)
        self.endpoint['sentence_fc1'] = sentence_fc1
        self.endpoint['sentence_fc2'] = sentence_fc2
        return sentence_fc2
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def imagenet(self, image_feat, reuse=False,skip=False):
        if skip:
            return image_feat
        with tf.variable_scope('image_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            image_fc1 = tf.contrib.layers.fully_connected(image_feat,2048, weights_regularizer=wd,scope='i_fc1')
            #drop_fc1 = tf.nn.dropout(image_fc1, self.keep_prob, name='drop_fc1')
            image_fc2 = tf.contrib.layers.fully_connected(image_fc1, 512, activation_fn=None, weights_regularizer=wd, scope='i_fc2')
            image_fc2_bn = tf.contrib.layers.batch_norm(image_fc2, center=True, scale=True, is_training=self.is_training, 
                                                        reuse=reuse, decay=0.999, updates_collections=None, 
                                                        scope='i_fc2_bn')
            embed = image_fc2_bn / tf.norm(image_fc2_bn,axis=-1,keep_dims=True)
        self.endpoint['image_fc1'] = image_fc1
        self.endpoint['image_fc2'] = embed
        return embed
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def sentencenet(self, input_tensor, reuse=False):
        with tf.variable_scope('sentence_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)

            #lstm_embed = self.lstm(input_tensor, reuse=reuse)
            sentence_fc1 = tf.contrib.layers.fully_connected(input_tensor, 2048, weights_regularizer=wd, scope='s_fc1')
        #drop_fc1 = tf.nn.dropout(sentence_fc1, self.keep_prob, name='drop_fc1')
            sentence_fc2 = tf.contrib.layers.fully_connected(sentence_fc1, 512,activation_fn=None, weights_regularizer=wd, scope='s_fc2')
        sentence_fc2_bn = tf.contrib.layers.batch_norm(sentence_fc2, center=True, scale=True, is_training=self.is_training,
                                                       reuse=reuse, decay=0.999, updates_collections=None, 
                                                       scope='s_fc2_bn')
            embed = sentence_fc2_bn/tf.norm(sentence_fc2_bn,axis= -1,keep_dims=True)
        self.endpoint['sentence_fc1'] = sentence_fc1
        self.endpoint['sentence_fc2'] = embed
        return embed
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def imagenet(self, image_feat, reuse=False, skip=False):
    if skip:
        return image_feat
        with tf.variable_scope('image_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            image_fc1 = tf.contrib.layers.fully_connected(image_feat,2048, weights_regularizer=wd,scope='i_fc1')
        #drop_fc1 = tf.nn.dropout(image_fc1, self.keep_prob, name='drop_fc1')
            image_fc2 = tf.contrib.layers.fully_connected(image_fc1, 512, activation_fn=None, weights_regularizer=wd, scope='i_fc2')
        image_fc2_bn = tf.contrib.layers.batch_norm(image_fc2, center=True, scale=True, is_training=self.is_training, 
                                                    reuse=reuse, decay=0.999, updates_collections=None, 
                                                    scope='i_fc2_bn')
            embed = image_fc2_bn / tf.norm(image_fc2_bn,axis=-1,keep_dims=True)
        self.endpoint['image_fc1'] = image_fc1
        self.endpoint['image_fc2'] = embed
        return embed
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def build_input(self):
        # positive
        self.raw_sentence= tf.placeholder(tf.float32, shape=[self.batch_size,1000],name='raw_sentence')
        self.sentence_emb =self.raw_sentence/(1e-12+tf.norm(self.raw_sentence,ord=2,axis=1,keep_dims=True)) #tf.nn.embedding_lookup(tf.get_variable('word_embedding',[4096,512]),self.raw_sentence)
        self.image_feat = tf.placeholder(tf.float32,shape=[self.batch_size,4096], name='image_features')
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def imagenet(self, image_feat, reuse=False,skip=False):
        if skip:
            return image_feat
        with tf.variable_scope('image_net', reuse=reuse) as scope:
            wd = tf.contrib.layers.l2_regularizer(self.weight_decay)
            image_fc1 = tf.nn.dropout(tf.contrib.layers.fully_connected(image_feat,2048, weights_regularizer=wd,scope='i_fc1'),keep_prob=self.keep_prob)
            #drop_fc1 = tf.nn.dropout(image_fc1, self.keep_prob, name='drop_fc1')
            image_fc2 = tf.contrib.layers.fully_connected(image_fc1, 512, activation_fn=None, weights_regularizer=wd, scope='i_fc2')
            image_fc2_bn = tf.contrib.layers.batch_norm(image_fc2, center=True, scale=True, is_training=self.is_training, 
                                                        reuse=reuse, decay=0.999, updates_collections=None, 
                                                        scope='i_fc2_bn')
            embed = image_fc2_bn / tf.norm(image_fc2_bn,axis=-1,keep_dims=True)
        self.endpoint['image_fc1'] = image_fc1
        self.endpoint['image_fc2'] = embed
        return embed