Python tensorflow 模块,placeholder_with_default() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.placeholder_with_default()

项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder to enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
项目:tf.rasterizer    作者:vahidk    | 项目源码 | 文件源码
def __init__(self):
        self.vertices = tf.placeholder(tf.float32, [None, 3])
        self.normals = tf.placeholder(tf.float32, [None, 3])
        self.uvs = tf.placeholder(tf.float32, [None, 2])
        self.texture = tf.placeholder(tf.float32, [None, None, 3])

        default_light_dir = np.array([-1, -1, -1], dtype=np.float32)
        default_ambient = np.array([0.5, 0.5, 0.5], dtype=np.float32)
        default_diffuse = np.array([1, 1, 1], dtype=np.float32)
        default_wvp = np.eye(4, dtype=np.float32)

        self.light_dir = tf.placeholder_with_default(default_light_dir, [3])
        self.ambient = tf.placeholder_with_default(default_ambient, [3])
        self.diffuse = tf.placeholder_with_default(default_diffuse, [3])
        self.wvp = tf.placeholder_with_default(default_wvp, [4, 4])

        self.packed_texture = utils.pack_colors(self.texture, 2, False)
        self.iwvp = tf.matrix_inverse(self.wvp)

        self.varying_uv = [None, None, None]
        self.varying_norm = [None, None, None]
项目:lda2vec-tf    作者:meereeum    | 项目源码 | 文件源码
def __init__(self, n_documents, n_topics, n_dim, temperature=1.0,
                 W_in=None, factors_in=None):
        self.n_documents = n_documents
        # self.n_topics = n_topics
        # self.n_dim = n_dim
        self.temperature = temperature
        self.dropout = tf.placeholder_with_default(1., shape=[], name="dropout")

        scalar = 1 / np.sqrt(n_documents + n_topics)

        self.W = (tf.Variable( # unnormalized embedding weights
            tf.random_normal([n_documents, n_topics], mean=0, stddev=50*scalar),
                name="doc_embeddings") if W_in is None else W_in)

        # factors = (tf.Variable( # topic vectors
        #       _orthogonal_matrix((n_topics, n_dim)).astype("float32") * scalar,
        #       name="topics") if factors_in is None else factors_in)

        # tf 0.12.0 only
        factors = (tf.get_variable("topics", shape=(n_topics, n_dim),
                                   dtype=tf.float32, initializer=
                                   tf.orthogonal_initializer(gain=scalar))
                   if factors_in is None else factors_in)
        self.factors = tf.nn.dropout(factors, self.dropout)
项目:WaveNet-Enhancement    作者:auspicious3000    | 项目源码 | 文件源码
def data_initializer_prior(data_segments, data_labels):
    # Input data
    segments_initializer = tf.placeholder_with_default(
        tf.zeros(data_segments.shape, tf.int32),
        shape=data_segments.shape,
        name='segments_initializer')
    labels_initializer = tf.placeholder_with_default(
        tf.zeros(data_labels.shape, tf.int32),
        shape=data_labels.shape,
        name='labels_initializer')
    input_segments = tf.Variable(
          segments_initializer, trainable=False, 
        collections=[tf.GraphKeys.LOCAL_VARIABLES], name='input_segments')
    input_labels = tf.Variable(
          labels_initializer, trainable=False, 
        collections=[tf.GraphKeys.LOCAL_VARIABLES], name='input_labels')    

    return (segments_initializer, labels_initializer, input_segments, input_labels)
项目:WaveNet-Enhancement    作者:auspicious3000    | 项目源码 | 文件源码
def data_initializer_simple(data_segments, data_labels):
    # Input data
    segments_initializer = tf.placeholder_with_default(
        tf.zeros(data_segments.shape, tf.float32),
        shape=data_segments.shape,
        name='segments_initializer')
    labels_initializer = tf.placeholder_with_default(
        tf.zeros(data_labels.shape, tf.int32),
        shape=data_labels.shape,
        name='labels_initializer')
    input_segments = tf.Variable(
          segments_initializer, trainable=False, 
        collections=[tf.GraphKeys.LOCAL_VARIABLES], name='input_segments')
    input_labels = tf.Variable(
          labels_initializer, trainable=False, 
        collections=[tf.GraphKeys.LOCAL_VARIABLES], name='input_labels')    

    return (segments_initializer, labels_initializer, input_segments, input_labels)
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def create_inputs(self, image_size):
        print("Creating input Placeholders...")
        #input image
        self.X = tf.placeholder(dtype=tf.float32, 
            shape=[None, image_size[0], image_size[1], 1],
            name="in")

        #Outputs of the different heads

        #Nodule head
        self.Y_nodule = tf.placeholder(dtype=tf.float32,
            shape=[None, image_size[0], image_size[1], 1],
            name="out_nodule")
        self.Y_nodule_weight = tf.placeholder_with_default(input=1.0,
            shape=None,
            name="nodule_weight")

        #Cancer head
        self.Y_cancer = tf.placeholder(dtype=tf.float32,
            shape=[None, 1],
            name="out_cancer")
        self.Y_cancer_weight = tf.placeholder_with_default(input=1.0,
            shape=None,
            name="cancer_weight")

        #Boolean variables to check head and mode
        self.is_training = tf.placeholder(dtype=tf.bool,
            name="is_training")
        self.is_nodule = tf.placeholder(dtype=tf.bool,
            name="is_nodule")
        self.is_cancer = tf.placeholder(dtype=tf.bool,
            name="is_cancer")

        #Probability for dropout
        self.drop_prob = tf.placeholder_with_default(input=0.0,
            shape=None,
            name="dropout_probability")

        print("Created input placeholders!")
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def interpret_dict( a_dict, model,n_times=1, on_logits=True):
    '''
    pass either a do_dict or a cond_dict.
    The rules for converting arguments to numpy arrays to pass
    to tensorflow are identical
    '''
    if a_dict is None:
        return {}
    elif len(a_dict)==0:
        return {}

    if n_times>1:
        token=tf.placeholder_with_default(2.22)
        a_dict[token]=-2.22

    p_a_dict=take_product(a_dict)

    ##Need divisible batch_size for most models
    if len(p_a_dict)>0:
        L=len(p_a_dict.values()[0])
    else:
        L=0
    print("L is " + str(L))
    print(p_a_dict)

    ##Check compatability batch_size and L
    if L>=model.batch_size:
        if not L % model.batch_size == 0:
            raise ValueError('a_dict must be dividable by batch_size\
                             but instead product of inputs was of length',L)
    elif model.batch_size % L == 0:
        p_a_dict = {key:np.repeat(value,model.batch_size/L,axis=0) for key,value in p_a_dict.items()}
    else:
        raise ValueError('No. of intervened values must divide batch_size.')
    return p_a_dict
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def __init__(self,N):
        with tf.variable_scope('Arrow') as scope:
            self.N=tf.placeholder_with_default(N,shape=[])
            #self.N=tf.constant(N) #how many to sample at a time
            self.e1=tf.random_uniform([self.N,1],0,1)
            self.e2=tf.random_uniform([self.N,1],0,1)
            self.e3=tf.random_uniform([self.N,1],0,1)
            self.build()
            #WARN. some of these are not trainable: i.e. poly
            self.var = tf.contrib.framework.get_variables(scope)
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def __init__(self, N, hidden_size=10,z_dim=10):
        with tf.variable_scope('Gen') as scope:
            self.N=tf.placeholder_with_default(N,shape=[])
            self.hidden_size=hidden_size
            self.z_dim=z_dim
            self.build()
            self.tr_var = tf.contrib.framework.get_variables(scope)
            self.step=tf.Variable(0,name='step',trainable=False)
            self.var = tf.contrib.framework.get_variables(scope)
项目:PSPNet-Keras-tensorflow    作者:Vladkryvoruchko    | 项目源码 | 文件源码
def __init__(self, inputs, trainable=True):
        # The input nodes for this network
        self.inputs = inputs
        # The current list of terminal nodes
        self.terminals = []
        # Mapping from layer names to layers
        self.layers = dict(inputs)
        # If true, the resulting variables are set as trainable
        self.trainable = trainable
        # Switch variable for dropout
        self.use_dropout = tf.placeholder_with_default(tf.constant(1.0),
                                                       shape=[],
                                                       name='use_dropout')
        self.setup()
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def create_placeholder(self):
        """Creates a TF placeholder_with_default.

        Convenience method that produces a constant of the type, value and shape defined by the port.
        Returns: a constant tensor of same type, shape and name. It can nevertheless be fed with external values
        as if it was a placeholder.
        """
        ph = tf.placeholder_with_default(self.default_value, self.shape, self.name)
        if ph.dtype != self.dtype:
            logger.warning(
                "Placeholder {} with default of type {} created for TensorPort with type {}!".format(self.name,
                                                                                                     ph.dtype,
                                                                                                     self.dtype))
        return ph
项目:DeepVideo    作者:AniketBajpai    | 项目源码 | 文件源码
def build_model(self, is_train=True):
        ''' Build model '''

        # Placeholders for data
        self.current_frames = tf.placeholder(
            name='current_frames', dtype=tf.float32,
            shape=[self.batch_size, self.num_frames, self.image_height, self.image_width, self.num_channels]
        )
        self.future_frames = tf.placeholder(
            name='future_frames', dtype=tf.float32,
            shape=[self.batch_size, self.num_frames, self.image_height, self.image_width, self.num_channels]
        )
        # self.label = tf.placeholder(
        #     name='label', dtype=tf.float32, shape=[self.batch_size, self.num_classes]
        # )

        self.is_train = tf.placeholder_with_default(bool(is_train), [], name='is_train')

        # Encoder
        self.E = Encoder('Encoder', self.configs_encoder)
        self.z = self.E(self.current_frames, is_debug=self.is_debug)

        # Generators
        self.Gr = Generator('Generator_R', self.configs_generator)
        self.Gf = Generator('Generator_F', self.configs_generator)

        self.generated_current_frames = self.Gr(self.z, is_debug=self.is_debug)
        self.generated_future_frames = self.Gf(self.z, is_debug=self.is_debug)

        # Discriminators
        self.D = Discriminator('Discriminator', self.configs_discriminator)

        self.D_real_current, self.D_real_current_logits = self.D(self.current_frames, is_debug=self.is_debug)
        self.D_fake_current, self.D_fake_current_logits = self.D(self.generated_current_frames, is_debug=self.is_debug)
        self.D_real_future, self.D_real_future_logits = self.D(self.future_frames, is_debug=self.is_debug)
        self.D_fake_future, self.D_fake_future_logits = self.D(self.generated_future_frames, is_debug=self.is_debug)

        print_message('Successfully loaded the model')
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
项目:tensorflow-yolo    作者:hjimce    | 项目源码 | 文件源码
def wrap_pholder(self, ph, feed):
        """wrap layer.h into placeholders"""
        phtype = type(self.lay.h[ph])
        if phtype is not dict: return

        sig = '{}/{}'.format(self.scope, ph)
        val = self.lay.h[ph]

        self.lay.h[ph] = tf.placeholder_with_default(
            val['dfault'], val['shape'], name = sig)
        feed[self.lay.h[ph]] = val['feed']
项目:tensorflow_homographynet    作者:linjian93    | 项目源码 | 文件源码
def __init__(self, inputs, trainable=True, is_training=False):
        # The input nodes for this network
        self.inputs = inputs
        # The current list of terminal nodes
        self.terminals = []
        # Mapping from layer names to layers
        self.layers = dict(inputs)
        # If true, the resulting variables are set as trainable
        self.trainable = trainable
        # Switch variable for dropout
        self.use_dropout = tf.placeholder_with_default(tf.constant(1.0),
                                                       shape=[],
                                                       name='use_dropout')
        self.setup(is_training)
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def test_input_sample(make_data):
    """Test the input and tiling layer."""
    x, _, X = make_data
    n_samples = tf.placeholder_with_default(3, [])
    s = ab.InputLayer(name='myname', n_samples=n_samples)

    F, KL = s(myname=x)
    tc = tf.test.TestCase()
    with tc.test_session():
        f = F.eval()
        X_array = X.eval()
        assert KL == 0.0
        assert np.array_equal(f, X_array)
        for i in range(3):
            assert np.array_equal(f[i], x)
项目:tensorflow-char-rnn    作者:crazydonkey200    | 项目源码 | 文件源码
def create_tuple_placeholders_with_default(inputs, extra_dims, shape):
  if isinstance(shape, int):
    result = tf.placeholder_with_default(
      inputs, list(extra_dims) + [shape])
  else:
    subplaceholders = [create_tuple_placeholders_with_default(
      subinputs, extra_dims, subshape)
                       for subinputs, subshape in zip(inputs, shape)]
    t = type(shape)
    if t == tuple:
      result = t(subplaceholders)
    else:
      result = t(*subplaceholders)    
  return result
项目:char-rnn-text-generation    作者:yxtay    | 项目源码 | 文件源码
def build_train_graph(loss, learning_rate=0.001, clip_norm=5.0):
    """
    builds training graph
    """
    train_args = {"learning_rate": learning_rate, "clip_norm": clip_norm}
    logger.debug("building training graph: %s.", train_args)

    learning_rate = tf.placeholder_with_default(learning_rate, [], "learning_rate")
    global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = layers.optimize_loss(loss, global_step, learning_rate, "Adam",
                                    clip_gradients=clip_norm)

    model = {"global_step": global_step, "train_op": train_op,
             "learning_rate": learning_rate, "train_args": train_args}
    return model
项目:language-translation-english-to-french    作者:Satyaki0924    | 项目源码 | 文件源码
def main(self):
        train_graph = tf.Graph()
        save_path = self.path + '/checkpoints/dev'
        source_path = self.path + '/data/small_vocab_en'
        target_path = self.path + '/data/small_vocab_fr'
        PreProcess(source_path, target_path).process_and_save_data()
        _, batch_size, rnn_size, num_layers, encoding_embedding_size, decoding_embedding_size, _, _ = \
            Params().get()
        (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = \
            self.load_process()
        max_source_sentence_length = max([len(sentence) for sentence in source_int_text])
        with train_graph.as_default():
            input_data, targets, lr, keep_prob = Inputs().get()
            sequence_length = tf.placeholder_with_default(
                max_source_sentence_length, None, name='sequence_length')
            input_shape = tf.shape(input_data)
            train_logits, inference_logits = Seq2seq().seq2seq_model(
                tf.reverse(input_data, [-1]), targets, keep_prob, batch_size,
                sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
                encoding_embedding_size, decoding_embedding_size,
                rnn_size, num_layers, target_vocab_to_int)
            tf.identity(inference_logits, 'logits')
            with tf.name_scope("optimization"):
                cost = tf.contrib.seq2seq.sequence_loss(train_logits, targets,
                                                        tf.ones([input_shape[0], sequence_length]))
                optimizer = tf.train.AdamOptimizer(lr)
                gradients = optimizer.compute_gradients(cost)
                capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var)
                                    for grad, var in gradients if grad is not None]
                train_op = optimizer.apply_gradients(capped_gradients)
        Train(source_int_text, target_int_text, train_graph, train_op, cost,
              input_data, targets, lr, sequence_length, keep_prob, inference_logits, save_path).train()
项目:text-gan-tensorflow    作者:tokestermw    | 项目源码 | 文件源码
def prepare_data(path, word2idx, num_threads=8, **opts):
    with tf.device("/cpu:0"):
        enqueue_data, dequeue_batch = get_input_queues(
            path, word2idx, batch_size=opts["batch_size"], num_threads=num_threads)
        # TODO: put this logic somewhere else
        input_ph = tf.placeholder_with_default(dequeue_batch, (None, None))
        source, target, sequence_length = preprocess(input_ph)
    return enqueue_data, input_ph, source, target, sequence_length
项目:maml    作者:cbfinn    | 项目源码 | 文件源码
def __init__(self, dim_input=1, dim_output=1, test_num_updates=5):
        """ must call construct_model() after initializing MAML! """
        self.dim_input = dim_input
        self.dim_output = dim_output
        self.update_lr = FLAGS.update_lr
        self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())
        self.classification = False
        self.test_num_updates = test_num_updates
        if FLAGS.datasource == 'sinusoid':
            self.dim_hidden = [40, 40]
            self.loss_func = mse
            self.forward = self.forward_fc
            self.construct_weights = self.construct_fc_weights
        elif FLAGS.datasource == 'omniglot' or FLAGS.datasource == 'miniimagenet':
            self.loss_func = xent
            self.classification = True
            if FLAGS.conv:
                self.dim_hidden = FLAGS.num_filters
                self.forward = self.forward_conv
                self.construct_weights = self.construct_conv_weights
            else:
                self.dim_hidden = [256, 128, 64, 64]
                self.forward=self.forward_fc
                self.construct_weights = self.construct_fc_weights
            if FLAGS.datasource == 'miniimagenet':
                self.channels = 3
            else:
                self.channels = 1
            self.img_size = int(np.sqrt(self.dim_input/self.channels))
        else:
            raise ValueError('Unrecognized data source.')
项目:tf_serving_example    作者:Vetal1977    | 项目源码 | 文件源码
def __init__(self, input_real, z_size, learning_rate, num_classes=10,
                 alpha=0.2, beta1=0.5, drop_rate=.5):
        """
        Initializes the GAN model.

        :param input_real: Real data for the discriminator
        :param z_size: The number of entries in the noise vector.
        :param learning_rate: The learning rate to use for Adam optimizer.
        :param num_classes: The number of classes to recognize.
        :param alpha: The slope of the left half of the leaky ReLU activation
        :param beta1: The beta1 parameter for Adam.
        :param drop_rate: RThe probability of dropping a hidden unit (used in discriminator)
        """

        self.learning_rate = tf.Variable(learning_rate, trainable=False)
        self.input_real = input_real
        self.input_z = tf.placeholder(tf.float32, (None, z_size), name='input_z')
        self.y = tf.placeholder(tf.int32, (None), name='y')
        self.label_mask = tf.placeholder(tf.int32, (None), name='label_mask')
        self.drop_rate = tf.placeholder_with_default(drop_rate, (), "drop_rate")

        loss_results = self.model_loss(self.input_real, self.input_z,
                                       self.input_real.shape[3], self.y, num_classes,
                                       label_mask=self.label_mask,
                                       drop_rate=self.drop_rate,
                                       alpha=alpha)

        self.d_loss, self.g_loss, self.correct, \
            self.masked_correct, self.samples, self.pred_class, \
                self.discriminator_class_logits, self.discriminator_out = \
                    loss_results

        self.d_opt, self.g_opt, self.shrink_lr = self.model_opt(self.d_loss,
                                                                self.g_loss,
                                                                self.learning_rate, beta1)
项目:tensorflow-deeplab-resnet    作者:DrSleep    | 项目源码 | 文件源码
def __init__(self, inputs, trainable=True, is_training=False, num_classes=21):
        # The input nodes for this network
        self.inputs = inputs
        # The current list of terminal nodes
        self.terminals = []
        # Mapping from layer names to layers
        self.layers = dict(inputs)
        # If true, the resulting variables are set as trainable
        self.trainable = trainable
        # Switch variable for dropout
        self.use_dropout = tf.placeholder_with_default(tf.constant(1.0),
                                                       shape=[],
                                                       name='use_dropout')
        self.setup(is_training, num_classes)
项目:Activation-Visualization-Histogram    作者:shaohua0116    | 项目源码 | 文件源码
def __init__(self, config,
                 debug_information=False,
                 is_train=True):
        self.debug = debug_information

        self.config = config
        self.batch_size = self.config.batch_size
        self.input_height = self.config.data_info[0]
        self.input_width = self.config.data_info[1]
        self.num_class = self.config.data_info[2]
        self.c_dim = self.config.data_info[3]
        self.visualize_shape = self.config.visualize_shape
        self.conv_info = self.config.conv_info
        self.activation_fn = {
            'selu': selu,
            'relu': tf.nn.relu,
            'lrelu': lrelu,
        }[self.config.activation]

        # create placeholders for the input
        self.image = tf.placeholder(
            name='image', dtype=tf.float32,
            shape=[self.batch_size, self.input_height, self.input_width, self.c_dim],
        )
        self.label = tf.placeholder(
            name='label', dtype=tf.float32, shape=[self.batch_size, self.num_class],
        )

        self.is_training = tf.placeholder_with_default(bool(is_train), [], name='is_training')

        self.build(is_train=is_train)
项目:tf-openpose    作者:ildoonet    | 项目源码 | 文件源码
def __init__(self, inputs, trainable=True):
        # The input nodes for this network
        self.inputs = inputs
        # The current list of terminal nodes
        self.terminals = []
        # Mapping from layer names to layers
        self.layers = dict(inputs)
        # If true, the resulting variables are set as trainable
        self.trainable = trainable
        # Switch variable for dropout
        self.use_dropout = tf.placeholder_with_default(tf.constant(1.0),
                                                       shape=[],
                                                       name='use_dropout')
        self.setup()
项目:deep_learning    作者:wecliqued    | 项目源码 | 文件源码
def _create_cell(self, seq, no_stacked_cells):
        """
        Creates GRU cell
        :param seq: placeholder of the input batch
        :return: cell and placeholder for its internal state
        """
        batch_size = tf.shape(seq)[0]
        # Since around May 2017, there is new way of constructing MultiRNNCell
        cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.GRUCell(self.hidden_size) for _ in range(no_stacked_cells)])
        multi_cell_zero_state = cell.zero_state(batch_size, tf.float32)
        in_state_shape = tuple([None, self.hidden_size] for _ in range(no_stacked_cells))
        in_state = tuple(tf.placeholder_with_default(cell_zero_state, [None, self.hidden_size], name='in_state') for cell_zero_state in multi_cell_zero_state)
        return cell, in_state
项目:deep_learning    作者:wecliqued    | 项目源码 | 文件源码
def _create_cell(self, seq, no_stacked_cells):
        """
        Creates GRU cell
        :param seq: placeholder of the input batch
        :return: cell and placeholder for its internal state
        """
        batch_size = tf.shape(seq)[0]

        ##########################################################################################################
        #
        # TODO: Create a stacked MultiRNNCell from GRU cells
        #       First, you have to use tf.contrib.rnn.GRUCell() to construct cells
        #       Since around May 2017, there is new way of constructing MultiRNNCell and you need to create
        #       one cell for each layer. Old code snippets that used [cell * no_stacked_cells] that you can
        #       find online might not work with the latest Tensorflow
        #
        #       After construction GRUCell objects, use it to construct tf.contrib.rnn.MultiRNNCell().
        #
        # YOUR CODE BEGIN
        #
        ##########################################################################################################

        cell = None # you

        ##########################################################################################################
        #
        # YOUR CODE END
        #
        ##########################################################################################################

        multi_cell_zero_state = cell.zero_state(batch_size, tf.float32)
        in_state_shape = tuple([None, self.hidden_size] for _ in range(no_stacked_cells))
        in_state = tuple(tf.placeholder_with_default(cell_zero_state, [None, self.hidden_size], name='in_state') for cell_zero_state in multi_cell_zero_state)

        return cell, in_state
项目:GraphSAGE    作者:williamleif    | 项目源码 | 文件源码
def construct_placeholders(num_classes):
    # Define placeholders
    placeholders = {
        'labels' : tf.placeholder(tf.float32, shape=(None, num_classes), name='labels'),
        'batch' : tf.placeholder(tf.int32, shape=(None), name='batch1'),
        'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),
        'batch_size' : tf.placeholder(tf.int32, name='batch_size'),
    }
    return placeholders
项目:GraphSAGE    作者:williamleif    | 项目源码 | 文件源码
def construct_placeholders():
    # Define placeholders
    placeholders = {
        'batch1' : tf.placeholder(tf.int32, shape=(None), name='batch1'),
        'batch2' : tf.placeholder(tf.int32, shape=(None), name='batch2'),
        # negative samples for all nodes in the batch
        'neg_samples': tf.placeholder(tf.int32, shape=(None,),
            name='neg_sample_size'),
        'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),
        'batch_size' : tf.placeholder(tf.int32, name='batch_size'),
    }
    return placeholders
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _process_towers_grads(self, dataset, opt, model, is_training=True, reuse=None, loss_type='cross_entropy', is_classification=True):
        tower_grads = []
        tower_loss = []
        self.target_probs = tf.placeholder_with_default(tf.convert_to_tensor([1 / float(self.num_classes) for _ in range(0, self.num_classes)]),
                                                        shape=[self.num_classes, ], name="target_probs")
        with tf.variable_scope(tf.get_variable_scope()):
            for i in xrange(self.cnf.get('num_gpus', 1)):
                with tf.device('/gpu:%d' % i):
                    with tf.name_scope('%s_%d' % (self.cnf.get('TOWER_NAME', 'tower'), i)) as scope:
                        images, labels = distorted_inputs(dataset, self.cnf['tfrecords_im_size'], self.cnf.get(
                            'crop_size'), batch_size=self.cnf['batch_size_train'], num_preprocess_threads=32, num_readers=8, target_probs=self.target_probs, init_probs=tf.convert_to_tensor(self.cnf['init_probs']), image_preprocessing=self.preprocessor.preprocess_image, data_balancing=self.data_balancing)
                        labels = self._adjust_ground_truth(labels)
                        loss = self._tower_loss(scope, model, images, labels, is_training=is_training,
                                                reuse=i > 0, is_classification=is_classification, gpu_id=i, loss_type=loss_type)

                        tf.get_variable_scope().reuse_variables()
                        if self.clip_by_global_norm:
                            grads_and_vars = self._clip_grad_global_norms(tf.trainable_variables(
                            ), loss, opt, global_norm=self.norm_threshold, gradient_noise_scale=0.0)
                        else:
                            grads_and_vars = opt.compute_gradients(loss)
                        tower_grads.append(grads_and_vars)
                        tower_loss.append(loss)

        grads_and_vars = self._average_gradients(tower_grads)

        return grads_and_vars, sum(tower_loss)
项目:PlantImageRecognition    作者:HeavenMin    | 项目源码 | 文件源码
def finalTrainingLayer(classCount, finalTensorName, bottleneckTensor):
    with tf.name_scope('input'):
        bottleneckInput = tf.placeholder_with_default(
            bottleneckTensor, shape = [None, BOTTLENECK_TENSOR_SIZE],
            name = 'BottleneckInputPlaceholder')

    groundTruthInput = tf.placeholder(tf.float32,
                                      [None, classCount],
                                      name = 'GroundTruthInput')
    layerName = 'finalLayer'
    with tf.name_scope(layerName):
        with tf.name_scope('weights'):
            initialValue = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, classCount],
                                               stddev=0.001)
            layerWeights = tf.Variable(initialValue, name = 'finalWeights')
            tensorBoardUsage(layerWeights)
        with tf.name_scope('biases'):
            layerBiases = tf.Variable(tf.zeros([classCount]), name='finalBiases')
            tensorBoardUsage(layerBiases)
        with tf.name_scope('WxPlusB'):
            logits = tf.matmul(bottleneckInput, layerWeights) + layerBiases
            tf.summary.histogram('pre_activations', logits)

    finalTensor = tf.nn.softmax(logits, name=finalTensorName)
    tf.summary.histogram('activations', finalTensor)

    with tf.name_scope('crossEntropy'):
        crossEntropy = tf.nn.softmax_cross_entropy_with_logits(
                       labels=groundTruthInput, logits=logits)
        with tf.name_scope('total'):
            crossEntropyMean = tf.reduce_mean(crossEntropy)
    tf.summary.scalar('cross_entropy', crossEntropyMean)

    with tf.name_scope('train'):
        optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
        trainStep = optimizer.minimize(crossEntropyMean)

    return (trainStep, crossEntropyMean, bottleneckInput, groundTruthInput,
            finalTensor)
项目:PlantImageRecognition    作者:HeavenMin    | 项目源码 | 文件源码
def finalTrainingLayer(classCount, finalTensorName, bottleneckTensor):
    with tf.name_scope('input'):
        bottleneckInput = tf.placeholder_with_default(
            bottleneckTensor, shape = [None, BOTTLENECK_TENSOR_SIZE],
            name = 'BottleneckInputPlaceholder')

    groundTruthInput = tf.placeholder(tf.float32,
                                      [None, classCount],
                                      name = 'GroundTruthInput')
    layerName = 'finalLayer'
    with tf.name_scope(layerName):
        with tf.name_scope('weights'):
            initialValue = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, classCount],
                                               stddev=0.001)
            layerWeights = tf.Variable(initialValue, name = 'finalWeights')
            tensorBoardUsage(layerWeights)
        with tf.name_scope('biases'):
            layerBiases = tf.Variable(tf.zeros([classCount]), name='finalBiases')
            tensorBoardUsage(layerBiases)
        with tf.name_scope('WxPlusB'):
            logits = tf.matmul(bottleneckInput, layerWeights) + layerBiases
            tf.summary.histogram('pre_activations', logits)

    finalTensor = tf.nn.softmax(logits, name=finalTensorName)
    tf.summary.histogram('activations', finalTensor)

    with tf.name_scope('crossEntropy'):
        crossEntropy = tf.nn.softmax_cross_entropy_with_logits(
                       labels=groundTruthInput, logits=logits)
        with tf.name_scope('total'):
            crossEntropyMean = tf.reduce_mean(crossEntropy)
    tf.summary.scalar('cross_entropy', crossEntropyMean)

    with tf.name_scope('train'):
        optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
        trainStep = optimizer.minimize(crossEntropyMean)

    return (trainStep, crossEntropyMean, bottleneckInput, groundTruthInput,
            finalTensor)
项目:PlantImageRecognition    作者:HeavenMin    | 项目源码 | 文件源码
def finalTrainingLayer(classCount, finalTensorName, bottleneckTensor):
    with tf.name_scope('input'):
        bottleneckInput = tf.placeholder_with_default(
            bottleneckTensor, shape = [None, BOTTLENECK_TENSOR_SIZE],
            name = 'BottleneckInputPlaceholder')

    groundTruthInput = tf.placeholder(tf.float32,
                                      [None, classCount],
                                      name = 'GroundTruthInput')
    layerName = 'finalLayer'
    with tf.name_scope(layerName):
        with tf.name_scope('weights'):
            initialValue = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, classCount],
                                               stddev=0.001)
            layerWeights = tf.Variable(initialValue, name = 'finalWeights')
            tensorBoardUsage(layerWeights)
        with tf.name_scope('biases'):
            layerBiases = tf.Variable(tf.zeros([classCount]), name='finalBiases')
            tensorBoardUsage(layerBiases)
        with tf.name_scope('WxPlusB'):
            logits = tf.matmul(bottleneckInput, layerWeights) + layerBiases
            tf.summary.histogram('pre_activations', logits)

    finalTensor = tf.nn.softmax(logits, name=finalTensorName)
    tf.summary.histogram('activations', finalTensor)

    with tf.name_scope('crossEntropy'):
        crossEntropy = tf.nn.softmax_cross_entropy_with_logits(
                       labels=groundTruthInput, logits=logits)
        with tf.name_scope('total'):
            crossEntropyMean = tf.reduce_mean(crossEntropy)
    tf.summary.scalar('cross_entropy', crossEntropyMean)

    with tf.name_scope('train'):
        optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
        trainStep = optimizer.minimize(crossEntropyMean)

    return (trainStep, crossEntropyMean, bottleneckInput, groundTruthInput,
            finalTensor)
项目:PlantImageRecognition    作者:HeavenMin    | 项目源码 | 文件源码
def finalTrainingLayer(classCount, finalTensorName, bottleneckTensor):
    with tf.name_scope('input'):
        bottleneckInput = tf.placeholder_with_default(
            bottleneckTensor, shape = [None, BOTTLENECK_TENSOR_SIZE],
            name = 'BottleneckInputPlaceholder')

    groundTruthInput = tf.placeholder(tf.float32,
                                      [None, classCount],
                                      name = 'GroundTruthInput')
    layerName = 'finalLayer'
    with tf.name_scope(layerName):
        with tf.name_scope('weights'):
            initialValue = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, classCount],
                                               stddev=0.001)
            layerWeights = tf.Variable(initialValue, name = 'finalWeights')
            tensorBoardUsage(layerWeights)
        with tf.name_scope('biases'):
            layerBiases = tf.Variable(tf.zeros([classCount]), name='finalBiases')
            tensorBoardUsage(layerBiases)
        with tf.name_scope('WxPlusB'):
            logits = tf.matmul(bottleneckInput, layerWeights) + layerBiases
            tf.summary.histogram('pre_activations', logits)

    finalTensor = tf.nn.softmax(logits, name=finalTensorName)
    tf.summary.histogram('activations', finalTensor)

    with tf.name_scope('crossEntropy'):
        crossEntropy = tf.nn.softmax_cross_entropy_with_logits(
                       labels=groundTruthInput, logits=logits)
        with tf.name_scope('total'):
            crossEntropyMean = tf.reduce_mean(crossEntropy)
    tf.summary.scalar('cross_entropy', crossEntropyMean)

    with tf.name_scope('train'):
        optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
        trainStep = optimizer.minimize(crossEntropyMean)

    return (trainStep, crossEntropyMean, bottleneckInput, groundTruthInput,
            finalTensor)
项目:PlantImageRecognition    作者:HeavenMin    | 项目源码 | 文件源码
def finalTrainingLayer(classCount, finalTensorName, bottleneckTensor):
    with tf.name_scope('input'):
        bottleneckInput = tf.placeholder_with_default(
            bottleneckTensor, shape = [None, BOTTLENECK_TENSOR_SIZE],
            name = 'BottleneckInputPlaceholder')

    groundTruthInput = tf.placeholder(tf.float32,
                                      [None, classCount],
                                      name = 'GroundTruthInput')
    layerName = 'finalLayer'
    with tf.name_scope(layerName):
        with tf.name_scope('weights'):
            initialValue = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, classCount],
                                               stddev=0.001)
            layerWeights = tf.Variable(initialValue, name = 'finalWeights')
            tensorBoardUsage(layerWeights)
        with tf.name_scope('biases'):
            layerBiases = tf.Variable(tf.zeros([classCount]), name='finalBiases')
            tensorBoardUsage(layerBiases)
        with tf.name_scope('WxPlusB'):
            logits = tf.matmul(bottleneckInput, layerWeights) + layerBiases
            tf.summary.histogram('pre_activations', logits)

    finalTensor = tf.nn.softmax(logits, name=finalTensorName)
    tf.summary.histogram('activations', finalTensor)

    with tf.name_scope('crossEntropy'):
        crossEntropy = tf.nn.softmax_cross_entropy_with_logits(
                       labels=groundTruthInput, logits=logits)
        with tf.name_scope('total'):
            crossEntropyMean = tf.reduce_mean(crossEntropy)
    tf.summary.scalar('cross_entropy', crossEntropyMean)

    with tf.name_scope('train'):
        optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
        trainStep = optimizer.minimize(crossEntropyMean)

    return (trainStep, crossEntropyMean, bottleneckInput, groundTruthInput,
            finalTensor)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self, conf, images=None, scores=None, goal_pos=None, desig_pos=None):
        batchsize = int(conf['batch_size'])
        if goal_pos is None:
            self.goal_pos = goal_pos= tf.placeholder(tf.float32, name='goalpos', shape=(batchsize, 2))
        if desig_pos is None:
            self.desig_pos = desig_pos =  tf.placeholder(tf.float32, name='desig_pos_pl', shape=(batchsize, 2))
        if scores is None:
            self.scores = scores = tf.placeholder(tf.float32, name='score_pl', shape=(batchsize, 1))
        if images is None:
            self.images = images = tf.placeholder(tf.float32, name='images_pl', shape=(batchsize, 1, 64,64,3))

        self.prefix = prefix = tf.placeholder(tf.string, [])

        from value_model import construct_model

        summaries = []
        inf_scores = construct_model(conf, images, goal_pos, desig_pos)
        self.inf_scores = inf_scores
        self.loss = loss = mean_squared_error(inf_scores, scores)

        summaries.append(tf.scalar_summary(prefix + '_loss', loss))

        self.lr = tf.placeholder_with_default(conf['learning_rate'], ())

        self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)
        self.summ_op = tf.merge_summary(summaries)
项目:atari-rl    作者:brendanator    | 项目源码 | 文件源码
def auto_placeholder(dtype, shape, name, feed_data, preprocess_offset=None):
  placeholder_shape = [None, None] + list(shape)[1:] if shape else shape
  placeholder = tf.placeholder(dtype, placeholder_shape, name)
  placeholder.required_feeds = RequiredFeeds(placeholder)
  placeholder.feed_data = feed_data

  tensor = preprocess_offset(placeholder) if preprocess_offset else placeholder

  def offset_data(t, name):
    input_len = shape[0]
    if not hasattr(placeholder, 'zero_offset'):
      placeholder.zero_offset = tf.placeholder_with_default(
          input_len - 1,  # If no zero_offset is given assume that t = 0
          (),
          name + '/zero_offset')

    end = t + 1
    start = end - input_len
    zero_offset = placeholder.zero_offset
    offset_tensor = tensor[:, start + zero_offset:end + zero_offset]

    input_range = np.arange(start, end)
    offset_tensor.required_feeds = RequiredFeeds(placeholder, input_range)

    return tf.reshape(offset_tensor, [-1] + shape, name)

  placeholder.offset_data = offset_data
  return placeholder
项目:tensorsandbox    作者:kaizouman    | 项目源码 | 文件源码
def __init__(self, wd=WEIGHT_DECAY, dropout=0.0):

        self.wd = wd
        self.dropout = dropout
        self.sizes = []
        self.flops = []
        self.training = tf.placeholder_with_default(False, shape=[], name="training")
项目:tensorsandbox    作者:kaizouman    | 项目源码 | 文件源码
def train_inputs(data_dir):
    """Construct input for CIFAR training.

    Note that batch_size is a placeholder whose default value is the one
    specified during training. It can however be specified differently at
    inference time by passing it explicitly in the feed dict when sess.run is
    called.

    Args:
        data_dir: Path to the CIFAR-10 data directory.

    Returns:
        images: Images. 4D tensor [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3].
        labels: Labels. 1D tensor [batch_size].
    """

    # Transpose dimensions
    raw_image, label = get_raw_input_data(False, data_dir)

    # If needed, perform data augmentation
    if tf.app.flags.FLAGS.data_aug:
        image = distort_image(raw_image)
    else:
        image = raw_image

    # Normalize image (substract mean and divide by variance)
    float_image = tf.image.per_image_standardization(image)

    # Create a queue to extract batch of samples
    batch_size_tensor = tf.placeholder_with_default(FLAGS.batch_size, shape=[])
    images, labels = tf.train.shuffle_batch([float_image,label],
                                     batch_size = batch_size_tensor,
                                     num_threads = NUM_THREADS,
                                     capacity = 20000 + 3 * FLAGS.batch_size,
                                     min_after_dequeue = 20000)

    # Display the training images in the visualizer
    tf.summary.image('images', images)

    return images, tf.reshape(labels, [-1])
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def get_initial_loop_state(self) -> BeamSearchLoopState:
        # TODO make these feedable
        output_ta = SearchStepOutputTA(
            scores=tf.TensorArray(dtype=tf.float32, dynamic_size=True,
                                  size=0, name="beam_scores"),
            parent_ids=tf.TensorArray(dtype=tf.int32, dynamic_size=True,
                                      size=0, name="beam_parents"),
            token_ids=tf.TensorArray(dtype=tf.int32, dynamic_size=True,
                                     size=0, name="beam_tokens"))

        # We run the decoder once to get logits for ensembling
        dec_ls = self.parent_decoder.get_initial_loop_state()
        decoder_body = self.parent_decoder.get_body(False)
        dec_ls = decoder_body(*dec_ls)

        # We want to feed these values in ensembles
        self._search_state = SearchState(
            logprob_sum=tf.placeholder_with_default([0.0], [None]),
            prev_logprobs=tf.nn.log_softmax(dec_ls.feedables.prev_logits),
            lengths=tf.placeholder_with_default([1], [None]),
            finished=tf.placeholder_with_default([False], [None]))

        self._decoder_state = dec_ls.feedables

        # TODO make TensorArrays also feedable
        return BeamSearchLoopState(
            bs_state=self._search_state,
            bs_output=output_ta,
            decoder_loop_state=dec_ls)
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def get_initial_loop_state(self) -> BeamSearchLoopState:
        # TODO make these feedable
        output_ta = SearchStepOutputTA(
            scores=tf.TensorArray(dtype=tf.float32, dynamic_size=True,
                                  size=0, name="beam_scores"),
            parent_ids=tf.TensorArray(dtype=tf.int32, dynamic_size=True,
                                      size=0, name="beam_parents"),
            token_ids=tf.TensorArray(dtype=tf.int32, dynamic_size=True,
                                     size=0, name="beam_tokens"))

        # We run the decoder once to get logits for ensembling
        dec_ls = self.parent_decoder.get_initial_loop_state()
        decoder_body = self.parent_decoder.get_body(False)
        dec_ls = decoder_body(*dec_ls)

        # We want to feed these values in ensembles
        self._search_state = SearchState(
            logprob_sum=tf.placeholder_with_default([0.0], [None]),
            prev_logprobs=tf.nn.log_softmax(dec_ls.feedables.prev_logits),
            lengths=tf.placeholder_with_default([1], [None]),
            finished=tf.placeholder_with_default([False], [None]))

        self._decoder_state = dec_ls.feedables

        # TODO make TensorArrays also feedable
        return BeamSearchLoopState(
            bs_state=self._search_state,
            bs_output=output_ta,
            decoder_loop_state=dec_ls)
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def get_initial_loop_state(self) -> BeamSearchLoopState:
        # TODO make these feedable
        output_ta = SearchStepOutputTA(
            scores=tf.TensorArray(dtype=tf.float32, dynamic_size=True,
                                  size=0, name="beam_scores"),
            parent_ids=tf.TensorArray(dtype=tf.int32, dynamic_size=True,
                                      size=0, name="beam_parents"),
            token_ids=tf.TensorArray(dtype=tf.int32, dynamic_size=True,
                                     size=0, name="beam_tokens"))

        # We run the decoder once to get logits for ensembling
        dec_ls = self.parent_decoder.get_initial_loop_state()
        decoder_body = self.parent_decoder.get_body(False)
        dec_ls = decoder_body(*dec_ls)

        # We want to feed these values in ensembles
        self._search_state = SearchState(
            logprob_sum=tf.placeholder_with_default([0.0], [None]),
            prev_logprobs=tf.nn.log_softmax(dec_ls.feedables.prev_logits),
            lengths=tf.placeholder_with_default([1], [None]),
            finished=tf.placeholder_with_default([False], [None]))

        self._decoder_state = dec_ls.feedables

        # TODO make TensorArrays also feedable
        return BeamSearchLoopState(
            bs_state=self._search_state,
            bs_output=output_ta,
            decoder_loop_state=dec_ls)
项目:supervised-embedding-model    作者:sld    | 项目源码 | 文件源码
def _init_summaries(self):
        self.accuracy = tf.placeholder_with_default(0.0, shape=(), name='Accuracy')
        self.accuracy_summary = tf.scalar_summary('Accuracy summary', self.accuracy)

        self.f_pos_summary = tf.histogram_summary('f_pos', self.f_pos)
        self.f_neg_summary = tf.histogram_summary('f_neg', self.f_neg)

        self.loss_summary = tf.scalar_summary('Mini-batch loss', self.loss)
        self.summary_op = tf.merge_summary(
            [
                self.f_pos_summary,
                self.f_neg_summary,
                self.loss_summary
            ]
        )
项目:examples    作者:guildai    | 项目源码 | 文件源码
def init_model():
    global x, y

    # Input layer
    x = tf.placeholder(tf.float32, [None, 784])

    # First convolutional layer
    W_conv1 = weight_variable([5, 5, 1, 32])
    b_conv1 = bias_variable([32])
    x_image = tf.reshape(x, [-1, 28, 28, 1])
    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
    h_pool1 = max_pool_2x2(h_conv1)

    # Second convolutional layer
    W_conv2 = weight_variable([5, 5, 32, 64])
    b_conv2 = bias_variable([64])
    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2)

    # First fully connected layer
    W_fc1 = weight_variable([7 * 7 * 64, 1024])
    b_fc1 = bias_variable([1024])
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

    # Dropout
    keep_prob = tf.placeholder_with_default(1.0, [])
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    # Output layer
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    y = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
项目:TensorArtist    作者:vacancy    | 项目源码 | 文件源码
def setup(self, graph):
        self._placeholders = graph.get_collection(TArtGraphKeys.PLACEHOLDERS)
        placeholders_dtypes = [x.dtype for x in self._placeholders]
        self._input_queue = tf.FIFOQueue(self._env.flags.input_queue_size, placeholders_dtypes, name=self._name)
        self._input_queue_cond = tf.placeholder_with_default(True, shape=[], name=self._name + '_cond')

        self.enqueue_op = self._input_queue.enqueue(self._placeholders)
        self.dequeue_op = self._input_queue.dequeue()
        self.close_op = self._input_queue.close(cancel_pending_enqueues=True)
        self.qsize_op = self._input_queue.size()

        for a, b in zip(self._placeholders, self.dequeue_op):
            as_tftensor(b).set_shape(as_tftensor(a).get_shape())

        self.edit_graph(graph)