Python tensorflow 模块,pack() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.pack()

项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def one_hot_encoding(labels, num_classes, scope=None):
  """Transform numeric labels into onehot_labels.

  Args:
    labels: [batch_size] target labels.
    num_classes: total number of classes.
    scope: Optional scope for op_scope.
  Returns:
    one hot encoding of the labels.
  """
  with tf.op_scope([labels], scope, 'OneHotEncoding'):
    batch_size = labels.get_shape()[0]
    indices = tf.expand_dims(tf.range(0, batch_size), 1)
    labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
    concated = tf.concat(1, [indices, labels])
    onehot_labels = tf.sparse_to_dense(
        concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
    onehot_labels.set_shape([batch_size, num_classes])
    return onehot_labels
项目:tfplus    作者:renmengye    | 项目源码 | 文件源码
def init_var(self):
        self.rand_h = tf.random_uniform([1], 1.0 - float(self.rnd_hflip), 1.0)
        self.rand_v = tf.random_uniform([1], 1.0 - float(self.rnd_vflip), 1.0)
        self.rand_t = tf.random_uniform(
            [1], 1.0 - float(self.rnd_transpose), 1.0)
        self.offset = tf.random_uniform(
            [2], dtype='int32', maxval=self.padding * 2 + self.shrink)
        if self._debug:
            self.offset = tf.Print(self.offset,
                                   ['Forward RND module', self.offset])
        if self.rnd_size:
            self.space = 2 * self.padding - self.offset
            self.offset20 = tf.random_uniform(
                [], dtype='int32', maxval=self.space[0] * 2) - self.space[0]
            self.offset21 = tf.random_uniform(
                [], dtype='int32', maxval=self.space[1] * 2) - self.space[1]
            self.offset2 = tf.pack([self.offset20, self.offset21])
        else:
            self.offset2 = tf.zeros([2], dtype='int32')
        pass
项目:tf-image-interpreter    作者:ThoughtWorksInc    | 项目源码 | 文件源码
def _bbox_transform(self, ex_rois, gt_rois):
    ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
    ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
    ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
    ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights

    gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
    gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
    gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
    gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights

    targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
    targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
    targets_dw = tf.log(gt_widths / ex_widths)
    targets_dh = tf.log(gt_heights / ex_heights)

    targets = tf.transpose(tf.pack(
      (targets_dx, targets_dy, targets_dw, targets_dh),
      axis=0
    ))
    return targets
项目:tf-image-interpreter    作者:ThoughtWorksInc    | 项目源码 | 文件源码
def _clip_boxes(self, boxes, image):
    height = tf.shape(image)[1]
    width = tf.shape(image)[2]
    # TODO: what TF will do with tensors that will not be used anymore?
    x1_over_0 = tf.reshape(tf.maximum(tf.minimum(boxes[:, 0::4], tf.cast(width - 1, tf.float32)), 0), (-1,))
    y1_over_0 = tf.reshape(tf.maximum(tf.minimum(boxes[:, 1::4], tf.cast(height - 1, tf.float32)), 0), (-1,))
    x2_below_width = tf.reshape(tf.maximum(tf.minimum(boxes[:, 2::4], tf.cast(width - 1, tf.float32)), 0), (-1,))
    y2_below_height = tf.reshape(tf.maximum(tf.minimum(boxes[:, 3::4], tf.cast(height - 1, tf.float32)), 0), (-1,))
    boxes = tf.pack(
      [x1_over_0,  # x1 >= 0
       y1_over_0,  # y1 >= 0
       x2_below_width,  # x2 < im_shape[1]
       y2_below_height],  # y2 < im_shape[0]
      axis=1
    )
    return boxes

  # bbox_transform_inv
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def __call__(self, input_layer, output_shape,
                 k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
                 name="deconv2d"):
        output_shape[0] = input_layer.shape[0]
        ts_output_shape = tf.pack(output_shape)
        with tf.variable_scope(name):
            # filter : [height, width, output_channels, in_channels]
            w = self.variable('w', [k_h, k_w, output_shape[-1], input_layer.shape[-1]],
                              init=tf.random_normal_initializer(stddev=stddev))

            try:
                deconv = tf.nn.conv2d_transpose(input_layer, w,
                                                output_shape=ts_output_shape,
                                                strides=[1, d_h, d_w, 1])

            # Support for versions of TensorFlow before 0.7.0
            except AttributeError:
                deconv = tf.nn.deconv2d(input_layer, w, output_shape=ts_output_shape,
                                        strides=[1, d_h, d_w, 1])

            # biases = self.variable('biases', [output_shape[-1]], init=tf.constant_initializer(0.0))
            # deconv = tf.reshape(tf.nn.bias_add(deconv, biases), [-1] + output_shape[1:])
            deconv = tf.reshape(deconv, [-1] + output_shape[1:])

            return deconv
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def __call__(self, input_layer, output_size, scope=None, in_dim=None, stddev=0.02, bias_start=0.0):
        shape = input_layer.shape
        input_ = input_layer.tensor
        try:
            if len(shape) == 4:
                input_ = tf.reshape(input_, tf.pack([tf.shape(input_)[0], np.prod(shape[1:])]))
                input_.set_shape([None, np.prod(shape[1:])])
                shape = input_.get_shape().as_list()

            with tf.variable_scope(scope or "Linear"):
                matrix = self.variable("Matrix", [in_dim or shape[1], output_size], dt=tf.float32,
                                       init=tf.random_normal_initializer(stddev=stddev))
                bias = self.variable("bias", [output_size], init=tf.constant_initializer(bias_start))
                return input_layer.with_tensor(tf.matmul(input_, matrix) + bias, parameters=self.vars)
        except Exception:
            import ipdb; ipdb.set_trace()
项目:TI-pooling    作者:dlaptev    | 项目源码 | 文件源码
def define_model(x,
                 keep_prob,
                 number_of_classes,
                 number_of_filters,
                 number_of_fc_features):
  splitted = tf.unpack(x, axis=4)
  branches = []
  with tf.variable_scope('branches') as scope:  
    for index, tensor_slice in enumerate(splitted):
      branches.append(single_branch(splitted[index],
                      number_of_filters,
                      number_of_fc_features))
      if (index == 0):
        scope.reuse_variables()
    concatenated = tf.pack(branches, axis=2)
    ti_pooled = tf.reduce_max(concatenated, reduction_indices=[2])
    drop = tf.nn.dropout(ti_pooled, keep_prob)
  with tf.variable_scope('fc2'):
    logits = fc(drop,
                [number_of_fc_features, number_of_classes],
                [number_of_classes])
  return logits
项目:Face-Pose-Net    作者:fengju514    | 项目源码 | 文件源码
def _meshgrid(self, height, width):
    with tf.variable_scope('_meshgrid'):
      # This should be equivalent to:
      #  x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
      #                         np.linspace(-1, 1, height))
      #  ones = np.ones(np.prod(x_t.shape))
      #  grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
      x_t = tf.matmul(tf.ones(shape=tf.pack([height, 1])),
                        tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
      y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
                        tf.ones(shape=tf.pack([1, width])))

      x_t_flat = tf.reshape(x_t, (1, -1))
      y_t_flat = tf.reshape(y_t, (1, -1))

      ones = tf.ones_like(x_t_flat)
      grid = tf.concat(0, [x_t_flat, y_t_flat, ones])
      return grid
项目:answer-triggering    作者:jiez-osu    | 项目源码 | 文件源码
def bag_hinge_loss(config, preds, sent_mask, flip_sent_mask, hete_mask,
                   sent_trgt, sent_num):
  """ HINGE LOSS:
      DEFINED AS: MAX(0, M - MIN(SENT+) - MAX(SENT-))
      THIS ONLY APPLIES TO HETE BAGS.
  """
  flip_sent_trgt = \
      tf.constant(1, shape=[config.batch_size,sent_num], dtype=config.data_type) - \
      sent_trgt
  pos_preds = preds + flip_sent_trgt + flip_sent_mask # [batch_size, sent_num]
  neg_preds = preds * flip_sent_trgt * sent_mask # [batch_size, sent_num]
  min_pos_pred = tf.reduce_min(pos_preds, 1)
  # min_pos_pred = tf.Print(min_pos_pred, [min_pos_pred], message='min_pos_pred')
  max_neg_pred = tf.reduce_max(neg_preds, 1)
  # max_neg_pred = tf.Print(max_neg_pred, [max_neg_pred], message='max_neg_pred')

  hinge_loss = hete_mask * tf.reduce_max(tf.pack(
      [tf.constant(0, shape=[config.batch_size], dtype=config.data_type),
       (0.20 - min_pos_pred + max_neg_pred)], axis=1), 1) # [batch_size]
  # hinge_loss = tf.Print(hinge_loss, [hinge_loss], message='hinge_loss', summarize=20)

  avg_hinge_loss = tf.reduce_sum(hinge_loss) / (tf.reduce_sum(hete_mask) + 1e-12)
  return avg_hinge_loss
项目:PSPNet-Keras-tensorflow    作者:Vladkryvoruchko    | 项目源码 | 文件源码
def process_image(img, scale, isotropic, crop, mean):
    '''Crops, scales, and normalizes the given image.
    scale : The image wil be first scaled to this size.
            If isotropic is true, the smaller side is rescaled to this,
            preserving the aspect ratio.
    crop  : After scaling, a central crop of this size is taken.
    mean  : Subtracted from the image
    '''
    # Rescale
    if isotropic:
        img_shape = tf.to_float(tf.shape(img)[:2])
        min_length = tf.minimum(img_shape[0], img_shape[1])
        new_shape = tf.to_int32((scale / min_length) * img_shape)
    else:
        new_shape = tf.pack([scale, scale])
    img = tf.image.resize_images(img, new_shape[0], new_shape[1])
    # Center crop
    # Use the slice workaround until crop_to_bounding_box supports deferred tensor shapes
    # See: https://github.com/tensorflow/tensorflow/issues/521
    offset = (new_shape - crop) / 2
    img = tf.slice(img, begin=tf.pack([offset[0], offset[1], 0]), size=tf.pack([crop, crop, -1]))
    # Mean subtraction
    return tf.to_float(img) - mean
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        input_shape = tf.shape(input)
        n_batches = input_shape[0]
        n_steps = input_shape[1]
        input = tf.reshape(input, tf.pack([n_batches, n_steps, -1]))
        if 'recurrent_state' in kwargs and self in kwargs['recurrent_state']:
            h0s = kwargs['recurrent_state'][self]
        else:
            h0s = tf.tile(
                tf.reshape(self.h0, (1, self.num_units)),
                (n_batches, 1)
            )
        # flatten extra dimensions
        shuffled_input = tf.transpose(input, (1, 0, 2))
        hs = tf.scan(
            self.step,
            elems=shuffled_input,
            initializer=h0s
        )
        shuffled_hs = tf.transpose(hs, (1, 0, 2))
        if 'recurrent_state_output' in kwargs:
            kwargs['recurrent_state_output'][self] = shuffled_hs
        return shuffled_hs
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        input_shape = tf.shape(input)
        n_batches = input_shape[0]
        n_steps = input_shape[1]
        input = tf.reshape(input, tf.pack([n_batches, n_steps, -1]))
        c0s = tf.tile(
            tf.reshape(self.c0, (1, self.num_units)),
            (n_batches, 1)
        )
        h0s = self.nonlinearity(c0s)
        # flatten extra dimensions
        shuffled_input = tf.transpose(input, (1, 0, 2))
        hcs = tf.scan(
            self.step,
            elems=shuffled_input,
            initializer=tf.concat(1, [h0s, c0s])
        )
        shuffled_hcs = tf.transpose(hcs, (1, 0, 2))
        shuffled_hs = shuffled_hcs[:, :, :self.num_units]
        shuffled_cs = shuffled_hcs[:, :, self.num_units:]
        return shuffled_hs
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def dist_info_sym(self, obs_var, state_info_vars):
        n_batches = tf.shape(obs_var)[0]
        n_steps = tf.shape(obs_var)[1]
        obs_var = tf.reshape(obs_var, tf.pack([n_batches, n_steps, -1]))
        obs_var = tf.cast(obs_var, tf.float32)
        if self.state_include_action:
            prev_action_var = tf.cast(state_info_vars["prev_action"], tf.float32)
            all_input_var = tf.concat(2, [obs_var, prev_action_var])
        else:
            all_input_var = obs_var
        if self.feature_network is None:
            return dict(
                prob=L.get_output(
                    self.prob_network.output_layer,
                    {self.l_input: all_input_var}
                )
            )
        else:
            flat_input_var = tf.reshape(all_input_var, (-1, self.input_dim))
            return dict(
                prob=L.get_output(
                    self.prob_network.output_layer,
                    {self.l_input: all_input_var, self.feature_network.input_layer: flat_input_var}
                )
            )
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def dist_info_sym(self, obs_var, state_info_vars):
        n_batches = tf.shape(obs_var)[0]
        n_steps = tf.shape(obs_var)[1]
        obs_var = tf.reshape(obs_var, tf.pack([n_batches, n_steps, -1]))
        if self.state_include_action:
            prev_action_var = state_info_vars["prev_action"]
            all_input_var = tf.concat(2, [obs_var, prev_action_var])
        else:
            all_input_var = obs_var
        if self.feature_network is None:
            means, log_stds = L.get_output(
                [self.mean_network.output_layer, self.l_log_std],
                {self.l_input: all_input_var}
            )
        else:
            flat_input_var = tf.reshape(all_input_var, (-1, self.input_dim))
            means, log_stds = L.get_output(
                [self.mean_network.output_layer, self.l_log_std],
                {self.l_input: all_input_var, self.feature_network.input_layer: flat_input_var}
            )
        return dict(mean=means, log_std=log_stds)
项目:emnlp2017-bilstm-cnn-crf    作者:UKPLab    | 项目源码 | 文件源码
def batch_gather(reference, indices):
        '''Batchwise gathering of row indices.

        The numpy equivalent is reference[np.arange(batch_size), indices].

        # Arguments
            reference: tensor with ndim >= 2 of shape
              (batch_size, dim1, dim2, ..., dimN)
            indices: 1d integer tensor of shape (batch_size) satisfiying
              0 <= i < dim2 for each element i.

        # Returns
            A tensor with shape (batch_size, dim2, ..., dimN)
            equal to reference[1:batch_size, indices]
        '''
        batch_size = K.shape(reference)[0]
        indices = tf.pack([tf.range(batch_size), indices], axis=1)
        return tf.gather_nd(reference, indices)
项目:dwt    作者:min2209    | 项目源码 | 文件源码
def _upscore_layer(self, bottom, shape, params):
        strides = [1, params["stride"], params["stride"], 1]
        with tf.variable_scope(params["name"]):
            in_features = bottom.get_shape()[3].value

            if shape is None:
                in_shape = tf.shape(bottom)

                h = ((in_shape[1] - 1) * params["stride"]) + 1
                w = ((in_shape[2] - 1) * params["stride"]) + 1
                new_shape = [in_shape[0], h, w, params["outputChannels"]]
            else:
                new_shape = [shape[0], shape[1], shape[2], params["outputChannels"]]
                output_shape = tf.pack(new_shape)

                f_shape = [params["ksize"], params["ksize"], params["outputChannels"], in_features]

                weights = self.get_deconv_filter(f_shape, params)
                deconv = tf.nn.conv2d_transpose(bottom, weights, output_shape,
                                                strides=strides, padding='SAME')
        return deconv
项目:tensorflow-deeplab-lfov    作者:DrSleep    | 项目源码 | 文件源码
def loss(self, img_batch, label_batch):
        """Create the network, run inference on the input batch and compute loss.

        Args:
          input_batch: batch of pre-processed images.

        Returns:
          Pixel-wise softmax loss.
        """
        raw_output = self._create_network(tf.cast(img_batch, tf.float32), keep_prob=tf.constant(0.5))
        prediction = tf.reshape(raw_output, [-1, n_classes])

        # Need to resize labels and convert using one-hot encoding.
        label_batch = self.prepare_label(label_batch, tf.pack(raw_output.get_shape()[1:3]))
        gt = tf.reshape(label_batch, [-1, n_classes])

        # Pixel-wise softmax loss.
        loss = tf.nn.softmax_cross_entropy_with_logits(prediction, gt)
        reduced_loss = tf.reduce_mean(loss)

        return reduced_loss
项目:automatic-portrait-tf    作者:Corea    | 项目源码 | 文件源码
def build_deconv(self, layer_name, input_layer, shape, strides,
                     padding='VALID'):
        weight_name = layer_name + '_weight'
        weight = tf.get_variable(weight_name, shape=shape)
        self.net[weight_name] = weight

        b = tf.shape(input_layer)[0]
        h = tf.shape(input_layer)[1]
        w = tf.shape(input_layer)[2]
        d = tf.shape(input_layer)[3]
        _, sh, sw, _ = strides
        kh, kw, _, _ = shape

        output_shape = tf.pack([b, sh * (h - 1) + kh, sw * (w - 1) + kw, d])

        self.net[layer_name] = tf.nn.conv2d_transpose(
            input_layer, weight, output_shape, strides=strides,
            padding=padding, name=layer_name)
项目:StackGAN    作者:hanzhanggit    | 项目源码 | 文件源码
def __call__(self, input_layer, output_shape,
                 k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
                 name="deconv2d"):
        output_shape[0] = input_layer.shape[0]
        ts_output_shape = tf.pack(output_shape)
        with tf.variable_scope(name):
            # filter : [height, width, output_channels, in_channels]
            w = self.variable('w', [k_h, k_w, output_shape[-1], input_layer.shape[-1]],
                              init=tf.random_normal_initializer(stddev=stddev))

            try:
                deconv = tf.nn.conv2d_transpose(input_layer, w,
                                                output_shape=ts_output_shape,
                                                strides=[1, d_h, d_w, 1])

            # Support for versions of TensorFlow before 0.7.0
            except AttributeError:
                deconv = tf.nn.deconv2d(input_layer, w, output_shape=ts_output_shape,
                                        strides=[1, d_h, d_w, 1])

            # biases = self.variable('biases', [output_shape[-1]], init=tf.constant_initializer(0.0))
            # deconv = tf.reshape(tf.nn.bias_add(deconv, biases), [-1] + output_shape[1:])
            deconv = tf.reshape(deconv, [-1] + output_shape[1:])

            return deconv
项目:StackGAN    作者:hanzhanggit    | 项目源码 | 文件源码
def __call__(self, input_layer, output_size, scope=None, in_dim=None, stddev=0.02, bias_start=0.0):
        shape = input_layer.shape
        input_ = input_layer.tensor
        try:
            if len(shape) == 4:
                input_ = tf.reshape(input_, tf.pack([tf.shape(input_)[0], np.prod(shape[1:])]))
                input_.set_shape([None, np.prod(shape[1:])])
                shape = input_.get_shape().as_list()

            with tf.variable_scope(scope or "Linear"):
                matrix = self.variable("Matrix", [in_dim or shape[1], output_size], dt=tf.float32,
                                       init=tf.random_normal_initializer(stddev=stddev))
                bias = self.variable("bias", [output_size], init=tf.constant_initializer(bias_start))
                return input_layer.with_tensor(tf.matmul(input_, matrix) + bias, parameters=self.vars)
        except Exception:
            import ipdb; ipdb.set_trace()
项目:text-classification2    作者:yuhui-lin    | 项目源码 | 文件源码
def _reverse_seq(input_seq, lengths):
    """Reverse a list of Tensors up to specified lengths.
    Args:
        input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
        lengths:   A tensor of dimension batch_size, containing lengths for each
                   sequence in the batch. If "None" is specified, simply reverses
                   the list.
    Returns:
        time-reversed sequence
    """
    for input_ in input_seq:
        input_.set_shape(input_.get_shape().with_rank(2))

    # Join into (time, batch_size, depth)
    s_joined = array_ops_.pack(input_seq)

    # Reverse along dimension 0
    s_reversed = array_ops_.reverse_sequence(s_joined, lengths, 0, 1)
    # Split again into list
    result = array_ops_.unpack(s_reversed)
    return result
项目:Adversarial_Video_Generation    作者:dyelax    | 项目源码 | 文件源码
def lp_loss(gen_frames, gt_frames, l_num):
    """
    Calculates the sum of lp losses between the predicted and ground truth frames.

    @param gen_frames: The predicted frames at each scale.
    @param gt_frames: The ground truth frames at each scale
    @param l_num: 1 or 2 for l1 and l2 loss, respectively).

    @return: The lp loss.
    """
    # calculate the loss for each scale
    scale_losses = []
    for i in xrange(len(gen_frames)):
        scale_losses.append(tf.reduce_sum(tf.abs(gen_frames[i] - gt_frames[i])**l_num))

    # condense into one tensor and avg
    return tf.reduce_mean(tf.pack(scale_losses))
项目:Adversarial_Video_Generation    作者:dyelax    | 项目源码 | 文件源码
def adv_loss(preds, labels):
    """
    Calculates the sum of BCE losses between the predicted classifications and true labels.

    @param preds: The predicted classifications at each scale.
    @param labels: The true labels. (Same for every scale).

    @return: The adversarial loss.
    """
    # calculate the loss for each scale
    scale_losses = []
    for i in xrange(len(preds)):
        loss = bce_loss(preds[i], labels)
        scale_losses.append(loss)

    # condense into one tensor and avg
    return tf.reduce_mean(tf.pack(scale_losses))
项目:tensorflow-forward-ad    作者:renmengye    | 项目源码 | 文件源码
def Pack_FwGrad(*args, **kwargs):
  dx = args[1:]
  axis = kwargs["axis"]
  if all(map(lambda x: x is None, dx)):
    log.error("hey")
    return None
  else:
    ### Here we need to fill in zeros.
    def _mapper(_):
      dx = _[0]
      x = _[1]
      return dx if dx is not None else tf.zeros_like(x)

    dx = list(map(_mapper, zip(dx, list(args[0].inputs))))
    if tf.__version__.startswith("0"):
      return tf.pack(dx, axis=axis)
    else:
      return tf.stack(dx, axis=axis)
项目:piecewisecrf    作者:Vaan5    | 项目源码 | 文件源码
def one_hot_encoding(labels, num_classes, scope=None):
  """Transform numeric labels into onehot_labels.

  Args:
    labels: [batch_size] target labels.
    num_classes: total number of classes.
    scope: Optional scope for op_scope.
  Returns:
    one hot encoding of the labels.
  """
  with tf.op_scope([labels], scope, 'OneHotEncoding'):
    batch_size = labels.get_shape()[0]
    indices = tf.expand_dims(tf.range(0, batch_size), 1)
    labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
    concated = tf.concat(1, [indices, labels])
    onehot_labels = tf.sparse_to_dense(
        concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
    onehot_labels.set_shape([batch_size, num_classes])
    return onehot_labels
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def one_hot_encoding(labels, num_classes, scope=None):
  """Transform numeric labels into onehot_labels.

  Args:
    labels: [batch_size] target labels.
    num_classes: total number of classes.
    scope: Optional scope for name_scope.
  Returns:
    one hot encoding of the labels.
  """
  with tf.name_scope(scope, 'OneHotEncoding', [labels]):
    batch_size = labels.get_shape()[0]
    indices = tf.expand_dims(tf.range(0, batch_size), 1)
    labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
    concated = tf.concat([indices, labels], 1)
    onehot_labels = tf.sparse_to_dense(
        concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
    onehot_labels.set_shape([batch_size, num_classes])
    return onehot_labels
项目:EEGSignalAnalysis    作者:pprakhar30    | 项目源码 | 文件源码
def Get_Pre_Trained_Weights(input_vars,name):
    with open("vgg16.tfmodel", mode='rb') as f:
        fileContent = f.read()

    graph_def = tf.GraphDef()
    graph_def.ParseFromString(fileContent)
    images = tf.placeholder(tf.float32,shape = (None, 64, 64, 3),name=name)
    tf.import_graph_def(graph_def, input_map={ "images": images })
    print "graph loaded from disk"

    graph = tf.get_default_graph()
    with tf.Session() as sess:
        init = tf.initialize_all_variables()
        sess.run(init)
        #batch = np.reshape(input_vars,(-1, 224, 224, 3))
        n_timewin = 7   
        convnets = []
        for i in xrange(n_timewin):
            feed_dict = { images:input_vars[:,i,:,:,:] }
            pool_tensor = graph.get_tensor_by_name("import/pool5:0")
            pool_tensor = sess.run(pool_tensor, feed_dict=feed_dict)
            convnets.append(tf.contrib.layers.flatten(pool_tensor))
        convpool = tf.pack(convnets, axis = 1)
        return convpool
项目:rec-attend-public    作者:renmengye    | 项目源码 | 文件源码
def build_skip_conn_attn(cnn_channels, h_cnn_time, x_time, timespan):
  """Build skip connection for attention based model."""
  skip = [None]
  skip_ch = [0]
  nlayers = len(h_cnn_time[0])
  timespan = len(h_cnn_time)
  for jj in range(nlayers):
    lidx = nlayers - jj - 2
    if lidx >= 0:
      ll = [h_cnn_time[tt][lidx] for tt in range(timespan)]
    else:
      ll = x_time
    layer = tf.concat(1, [tf.expand_dims(l, 1) for l in ll])
    ss = tf.shape(layer)
    layer = tf.reshape(layer, tf.pack([-1, ss[2], ss[3], ss[4]]))
    skip.append(layer)
    ch_idx = lidx + 1
    skip_ch.append(cnn_channels[ch_idx])
  return skip, skip_ch
项目:cnn_picture_gazebo    作者:liuyandong1988    | 项目源码 | 文件源码
def decode_from_tfrecords(filename,num_epoch=None):
    filename_queue=tf.train.string_input_producer([filename],num_epochs=num_epoch)#???????????????????????????????????????
    reader=tf.TFRecordReader()
    _,serialized=reader.read(filename_queue)
    example=tf.parse_single_example(serialized,features={
        'height':tf.FixedLenFeature([],tf.int64),
        'width':tf.FixedLenFeature([],tf.int64),
        'nchannel':tf.FixedLenFeature([],tf.int64),
        'image':tf.FixedLenFeature([],tf.string),
        'label':tf.FixedLenFeature([],tf.int64)
    })
    label=tf.cast(example['label'], tf.int32)
    image=tf.decode_raw(example['image'],tf.uint8)
    image=tf.reshape(image,tf.pack([
        tf.cast(example['height'], tf.int32),
        tf.cast(example['width'], tf.int32),
        tf.cast(example['nchannel'], tf.int32)]))
    return image,label
项目:ultrasound-nerve-segmentation-in-tensorflow    作者:loliverhennigh    | 项目源码 | 文件源码
def transpose_conv_layer(inputs, kernel_size, stride, num_features, idx, nonlinearity=None):
  with tf.variable_scope('{0}_trans_conv'.format(idx)) as scope:
    input_channels = int(inputs.get_shape()[3])

    weights = _variable('weights', shape=[kernel_size,kernel_size,num_features,input_channels],initializer=tf.contrib.layers.xavier_initializer_conv2d())
    biases = _variable('biases',[num_features],initializer=tf.contrib.layers.xavier_initializer_conv2d())
    batch_size = tf.shape(inputs)[0]
    output_shape = tf.pack([tf.shape(inputs)[0], tf.shape(inputs)[1]*stride, tf.shape(inputs)[2]*stride, num_features]) 
    conv = tf.nn.conv2d_transpose(inputs, weights, output_shape, strides=[1,stride,stride,1], padding='SAME')
    conv_biased = tf.nn.bias_add(conv, biases)
    if nonlinearity is not None:
      conv_biased = nonlinearity(conv_biased)

    #reshape
    shape = int_shape(inputs)
    conv_biased = tf.reshape(conv_biased, [shape[0], shape[1]*stride, shape[2]*stride, num_features])

    return conv_biased
项目:rltools    作者:sisl    | 项目源码 | 文件源码
def __init__(self, input_, outdim=2, debug=False):
        assert outdim >= 1
        self._outdim = outdim
        input_shape = tuple(input_.get_shape().as_list())
        to_flatten = input_shape[self._outdim - 1:]
        if any(s is None for s in to_flatten):
            flattened = None
        else:
            flattened = int(np.prod(to_flatten))

        self._output_shape = input_shape[1:self._outdim - 1] + (flattened,)
        if debug:
            util.header('Flatten(new_shape=%s)' % str(self._output_shape))
        pre_shape = tf.shape(input_)[:self._outdim - 1:]
        to_flatten = tf.reduce_prod(tf.shape(input_)[self._outdim - 1:])
        self._output = tf.reshape(input_, tf.concat(0, [pre_shape, tf.pack([to_flatten])]))
项目:rltools    作者:sisl    | 项目源码 | 文件源码
def _make_actiondist_ops(self, obs_B_H_Df):
        B = tf.shape(obs_B_H_Df)[0]
        H = tf.shape(obs_B_H_Df)[1]
        flatobs_B_H_Df = tf.reshape(obs_B_H_Df, tf.pack([B, H, -1]))
        if self.state_include_action:
            net_in = tf.concat(2, [flatobs_B_H_Df, self._prev_actions_B_H_Da])
            net_shape = (np.prod(self.observation_space.shape) + self.action_space.n,)
        else:
            net_in = flatobs_B_H_Df
            net_shape = (np.prod(self.observation_space.shape),)
        with tf.variable_scope('net'):
            net = nn.GRUNet(net_in, net_shape, self.action_space.n, self.hidden_spec)

        # XXX
        self.hidden_dim = net._hidden_dim

        scores_B_H_Pa = net.output
        actiondist_B_H_Pa = scores_B_H_Pa - tfutil.logsumexp(scores_B_H_Pa, axis=2)

        compute_step_prob = tfutil.function([net.step_input, net.step_prev_hidden],
                                            [net.step_output, net.step_hidden])
        return actiondist_B_H_Pa, net.step_input, compute_step_prob, net.hid_init
项目:gmn    作者:sbos    | 项目源码 | 文件源码
def importance_weights(self, cache):
        gen_ll = {}
        rec_ll = {}

        # w[t][i] -- likelihood ratio for the i-th object after t objects has been seen
        w = [0.] * episode_length

        for i in xrange(episode_length):
            scg.likelihood(self.z[i], cache, rec_ll)
            scg.likelihood(self.x[i], cache, gen_ll)

            w[i] = gen_ll[VAE.observed_name(i)] + gen_ll[VAE.hidden_name(i)] - rec_ll[VAE.hidden_name(i)]

        w = tf.pack(w)

        return w, [gen_ll, rec_ll]
项目:trpo    作者:jjkke88    | 项目源码 | 文件源码
def __init__(self, scope):
        with tf.variable_scope("%s_shared" % scope):
            self.obs = obs = tf.placeholder(
                tf.float32, shape=[None, pms.obs_shape], name="%s_obs"%scope)
            self.action_n = tf.placeholder(tf.float32, shape=[None, pms.action_shape], name="%s_action"%scope)
            self.advant = tf.placeholder(tf.float32, shape=[None], name="%s_advant"%scope)
            self.old_dist_means_n = tf.placeholder(tf.float32, shape=[None, pms.action_shape],
                                                   name="%s_oldaction_dist_means"%scope)
            self.old_dist_logstds_n = tf.placeholder(tf.float32, shape=[None, pms.action_shape],
                                                     name="%s_oldaction_dist_logstds"%scope)
            self.action_dist_means_n = (pt.wrap(self.obs).
                                        fully_connected(64, activation_fn=tf.nn.relu, init=tf.random_normal_initializer(-0.05, 0.05), bias_init=tf.constant_initializer(0),
                                                        name="%s_fc1"%scope).
                                        fully_connected(64, activation_fn=tf.nn.relu, init=tf.random_normal_initializer(-0.05, 0.05), bias_init=tf.constant_initializer(0),
                                                         name="%s_fc2"%scope).
                                        fully_connected(pms.action_shape, init=tf.random_normal_initializer(-0.05, 0.05), bias_init=tf.constant_initializer(0),
                                                        name="%s_fc3"%scope))

            self.N = tf.shape(obs)[0]
            Nf = tf.cast(self.N, tf.float32)
            self.action_dist_logstd_param = tf.Variable((.01*np.random.randn(1, pms.action_shape)).astype(np.float32), name="%spolicy_logstd"%scope)
            self.action_dist_logstds_n = tf.tile(self.action_dist_logstd_param,
                                              tf.pack((tf.shape(self.action_dist_means_n)[0], 1)))
            self.var_list = [v for v in tf.trainable_variables()if v.name.startswith(scope)]
项目:keras-mdn    作者:yanji84    | 项目源码 | 文件源码
def get_mixture_coef(output, KMIX=24, OUTPUTDIM=1):
  out_pi = tf.placeholder(dtype=tf.float32, shape=[None,KMIX], name="mixparam")
  out_sigma = tf.placeholder(dtype=tf.float32, shape=[None,KMIX], name="mixparam")
  out_mu = tf.placeholder(dtype=tf.float32, shape=[None,KMIX*OUTPUTDIM], name="mixparam")
  splits = tf.split(1, 2 + OUTPUTDIM, output)
  out_pi = splits[0]
  out_sigma = splits[1]
  out_mu = tf.pack(splits[2:], axis=2)
  out_mu = tf.transpose(out_mu, [1,0,2])
  # use softmax to normalize pi into prob distribution
  max_pi = tf.reduce_max(out_pi, 1, keep_dims=True)
  out_pi = tf.sub(out_pi, max_pi)
  out_pi = tf.exp(out_pi)
  normalize_pi = tf.inv(tf.reduce_sum(out_pi, 1, keep_dims=True))
  out_pi = tf.mul(normalize_pi, out_pi)
  # use exponential to make sure sigma is positive
  out_sigma = tf.exp(out_sigma)
  return out_pi, out_sigma, out_mu
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def one_hot(labels, num_classes, name='one_hot'):
    """Transform numeric labels into onehot_labels.
    Args:
        labels: [batch_size] target labels.
        num_classes: total number of classes.
        scope: Optional scope for op_scope.
    Returns:
        one hot encoding of the labels.
    """
    with tf.op_scope(name):
        batch_size = labels.get_shape()[0]
        indices = tf.expand_dims(tf.range(0, batch_size), 1)
        labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
        concated = tf.concat(1, [indices, labels])
        onehot_labels = tf.sparse_to_dense(
            concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
        onehot_labels.set_shape([batch_size, num_classes])
        return onehot_labels
项目:deep-makeover    作者:david-gpu    | 项目源码 | 文件源码
def distort_image(image):
    """Perform random distortions to the given 4D image and return result"""

    # Switch to 3D as that's what these operations require
    slices = tf.unpack(image)
    output = []

    # Perform pixel-wise distortions
    for image in slices:
        image  = tf.image.random_flip_left_right(image)
        image  = tf.image.random_saturation(image, .2, 2.)
        image += tf.truncated_normal(image.get_shape(), stddev=.05)
        image  = tf.image.random_contrast(image, .85, 1.15)
        image  = tf.image.random_brightness(image, .3)

        output.append(image)

    # Go back to 4D
    image   = tf.pack(output)

    return image
项目:ste-GAN-ography2    作者:bin2415    | 项目源码 | 文件源码
def image_processing_layer(self, X):
        K = 1 / 12. * tf.constant(
            [
                [-1, 2, -2, 2, -1],
                [2, -6, 8, -6, 2],
                [-2, 8, -12, 8, -2],
                [2, -6, 8, -6, 2],
                [-1, 2, -2, 2, -1]
            ], dtype= tf.float32
        )
        #kernel = tf.pack([K, K, K])
        #kernel = tf.pack([kernel, kernel, kernel])
        kernel = tf.stack([K, K, K])
        kernel = tf.stack([kernel, kernel, kernel])

        return tf.nn.conv2d(X, tf.transpose(kernel, [2, 3, 0, 1]), [1, 1, 1, 1], padding='SAME')
项目:knowledgeflow    作者:3rduncle    | 项目源码 | 文件源码
def buildAttention(self):
        q_relu = self.tensors['q_relu']
        a_relu = self.tensors['a_relu']
        with tf.name_scope("attention"):
            W = identity([self.params['nb_filter'], self.params['nb_filter']], name='W')
            batch = tf.shape(q_relu)[0]
            q_matmul = tf.batch_matmul(q_relu, tf.tile(tf.expand_dims(W,[0]), tf.pack([batch, tf.constant(1), tf.constant(1)])))
            qa_attention = tf.batch_matmul(q_matmul, a_relu, adj_x=False, adj_y=True, name="attention")
            # shape = (batch, q_length, 1)
            qa_attention = tf.tanh(qa_attention)
            q_max = tf.reduce_max(qa_attention, reduction_indices=[2], keep_dims=True, name='q_max')
            # shape = (batch, 1, a_length)
            a_max = tf.reduce_max(qa_attention, reduction_indices=[1], keep_dims=True, name='a_max')
            # shape = (batch, q_length, 1)
            q_softmax = tf.expand_dims(tf.nn.softmax(tf.squeeze(q_max, [2])), -1)
            # shape = (batch, a_length, 1)
            a_softmax = tf.expand_dims(tf.nn.softmax(tf.squeeze(a_max, [1])), -1)
            # https://www.tensorflow.org/versions/r0.9/api_docs/python/math_ops.html#batch_matmul 
            # shape = (batch, NUM_FILTERS, 1)
            q_feature = tf.batch_matmul(q_relu, q_softmax, adj_x=True, adj_y=False)
            a_feature = tf.batch_matmul(a_relu, a_softmax, adj_x=True, adj_y=False)
        self.tensors['q_feature'] = q_feature
        self.tensors['a_feature'] = a_feature
        self.tensors.setdefault('weights', []).append(W)
项目:bnn-analysis    作者:myshkov    | 项目源码 | 文件源码
def model_chain_from_position(cls, chains_num, layer_descriptions, position_tensor, input_tensor):
        """ Creates multiple-chain model from the specified position and description. """
        positions = tf.split(0, chains_num, position_tensor)

        m = []
        for i in range(chains_num):
            m.append(cls.model_from_position(layer_descriptions, positions[i], input_tensor))

        models = tf.pack(m)

        return models
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def glimpseSensor(normalLocation, inputPlaceholder):
    location = tf.round(tf.multiply((normalLocation + 1)/2.0, InputImageSize))
    location = tf.cast(location, tf.int32)

    images = tf.reshape(inputPlaceholder, (batchSize, InputImageSize[0], 
                                          InputImageSize[1], 
                                          InputImageSize[2]))

    zooms = []
    for k in xrange(batchSize):
        imgZooms = []
        img = images[k]

        loc = location[k]

        for i in xrange(glimpseDepth):
            radius = int(glimpseRadius * (2 ** i))
            glimpse = getGlipmse(img, loc, radius)
            glimpse = tf.reshape(glimpse, (glimpseBandwidth, glimpseBandwidth, glimpseBandwidth))

            imgZooms.append(glimpse)

        zooms.append(tf.pack(imgZooms))

    zooms = tf.pack(zooms)

    return zooms
项目:tfplus    作者:renmengye    | 项目源码 | 文件源码
def random_flip_left_right(image, seed=None):
    uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
    mirror = math_ops.less(tf.pack(
        [1.0, 1.0, uniform_random, 1.0]), 0.5)
    return tf.reverse(image, mirror)
项目:tfplus    作者:renmengye    | 项目源码 | 文件源码
def random_flip_up_down(image, seed=None):
    uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
    mirror = math_ops.less(tf.pack(
        [1.0, uniform_random, 1.0, 1.0]), 0.5)
    return tf.reverse(image, mirror)
项目:tf-image-interpreter    作者:ThoughtWorksInc    | 项目源码 | 文件源码
def _combine_box_and_delta(self, bboxes, deltas):
    widths = bboxes[:, 2] - bboxes[:, 0] + 1.0
    heights = bboxes[:, 3] - bboxes[:, 1] + 1.0
    ctr_x = bboxes[:, 0] + 0.5 * widths
    ctr_y = bboxes[:, 1] + 0.5 * heights

    # use 0::4 to make it a [-1, 1] matrix, while the columns are 4
    dx = deltas[:, 0::4]
    dy = deltas[:, 1::4]
    dw = deltas[:, 2::4]
    dh = deltas[:, 3::4]

    # do not understand the transformation
    # TF ?????reshape????????????????
    pred_ctr_x = tf.reshape(dx * widths[:, tf.newaxis] + ctr_x[:, tf.newaxis], (-1,))
    pred_ctr_y = tf.reshape(dy * heights[:, tf.newaxis] + ctr_y[:, tf.newaxis], (-1,))
    pred_w = tf.reshape(tf.exp(dw) * widths[:, tf.newaxis], (-1,))
    pred_h = tf.reshape(tf.exp(dh) * heights[:, tf.newaxis], (-1,))

    pred_boxes = tf.pack(
      [pred_ctr_x - 0.5 * pred_w,
       pred_ctr_y - 0.5 * pred_h,
       pred_ctr_x + 0.5 * pred_w,
       pred_ctr_y + 0.5 * pred_h],
      axis=1
    )

    return pred_boxes
项目:Face-Pose-Net    作者:fengju514    | 项目源码 | 文件源码
def _repeat(self, x, n_repeats):
    with tf.variable_scope('_repeat'):
      rep = tf.transpose(
        tf.expand_dims(tf.ones(shape=tf.pack([n_repeats, ])), 1), [1, 0])
      rep = tf.cast(rep, 'int32')
      x = tf.matmul(tf.reshape(x, (-1, 1)), rep)
      return tf.reshape(x, [-1])
项目:Face-Pose-Net    作者:fengju514    | 项目源码 | 文件源码
def _transform(self, theta, input_dim, out_size, channel_input):
    with tf.variable_scope('_transform'):
      print input_dim.get_shape(), theta.get_shape(), out_size[0], out_size[1]
      num_batch = self.hps.batch_size #tf.shape(input_dim)[0]
      height = tf.shape(input_dim)[1]
      width = tf.shape(input_dim)[2]
      num_channels = tf.shape(input_dim)[3]
      theta = tf.reshape(theta, (-1, 2, 3))
      theta = tf.cast(theta, 'float32')

      # grid of (x_t, y_t, 1), eq (1) in ref [1]
      height_f = tf.cast(height, 'float32')
      width_f = tf.cast(width, 'float32')
      out_height = out_size[0]
      out_width = out_size[1]
      grid = self._meshgrid(out_height, out_width)
      #print grid, grid.get_shape()
      grid = tf.expand_dims(grid, 0)
      grid = tf.reshape(grid, [-1])
      grid = tf.tile(grid, tf.pack([num_batch]))
      grid = tf.reshape(grid, tf.pack([num_batch, 3, -1]))
      #print grid, grid.get_shape()

      # Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)
      T_g = tf.batch_matmul(theta, grid)
      x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1])
      y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1])
      x_s_flat = tf.reshape(x_s, [-1])
      y_s_flat = tf.reshape(y_s, [-1])
      #print x_s_flat.get_shape(), y_s_flat.get_shape()
      input_transformed = self._interpolate(input_dim, x_s_flat, y_s_flat, out_size, channel_input)
      #print input_transformed.get_shape()

      output = tf.reshape(input_transformed, tf.pack([num_batch, out_height, out_width, channel_input]))
      return output
      #return input_dim
项目:How-to-Learn-from-Little-Data    作者:llSourcell    | 项目源码 | 文件源码
def accuracy_instance(predictions, targets, n=[1, 2, 3, 4, 5, 10], nb_classes=5, nb_samples_per_class=10, batch_size=1):
    targets = tf.cast(targets, predictions.dtype)

    accuracy = tf.constant(value=0, shape=(batch_size, nb_samples_per_class), dtype=tf.float32)
    indices = tf.constant(value=0, shape=(batch_size, nb_classes+1), dtype=tf.float32)

    def step_((accuracy, indices), (p, t)):
        """with tf.variable_scope("Metric_step_var", reuse=True):
            accuracy = tf.get_variable(name="accuracy", shape=(batch_size, nb_samples_per_class),
                                       initializer=tf.constant_initializer(0), dtype=tf.float32)
            indices = tf.get_variable(name="indices", shape=(batch_size, nb_classes + 1),
                                      initializer=tf.constant_initializer(0), dtype=tf.float32)"""

        p = tf.cast(p, tf.int32)
        t = tf.cast(t, tf.int32)
        ##Accuracy Update
        batch_range = tf.cast(tf.range(0, batch_size), dtype=tf.int32)
        gather = tf.cast(tf.gather_nd(indices,tf.pack([tf.range(0,p.get_shape().as_list()[0]), t], axis=1)), tf.int32)
        index = tf.cast(tf.pack([batch_range, gather], axis=1), dtype=tf.int64)
        val = tf.cast(tf.equal(p, t), tf.float32)
        delta = tf.SparseTensor(indices=index, values=val, shape=tf.cast(accuracy.get_shape().as_list(), tf.int64))
        accuracy = accuracy + tf.sparse_tensor_to_dense(delta)
        ##Index Update
        index = tf.cast(tf.pack([batch_range, t], axis=1), dtype=tf.int64)
        val = tf.constant(1.0, shape=[batch_size])
        delta = tf.SparseTensor(indices=index, values=val, shape=tf.cast(indices.get_shape().as_list(), dtype=tf.int64))
        indices = indices + tf.sparse_tensor_to_dense(delta)
        return [accuracy, indices]
项目:tf_base    作者:ozansener    | 项目源码 | 文件源码
def process_single_image(img, scale, crop, mean):
    """
    Processing an image for zero-mean/std-dev fix etc
    """
    new_shape = tf.pack([scale, scale])
    img = tf.image.resize_images(img, new_shape[0], new_shape[1])
    offset = (new_shape - crop) / 2
    img = tf.slice(img, begin=tf.pack([offset[0], offset[1], 0]), size=tf.pack([crop, crop, -1]))
    return tf.to_float(img) - mean
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
  """Define kernel size which is automatically reduced for small input.

  If the shape of the input images is unknown at graph construction time this
  function assumes that the input images are is large enough.

  Args:
    input_tensor: input tensor of size [batch_size, height, width, channels].
    kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]

  Returns:
    a tensor with the kernel size.

  TODO(jrru): Make this function work with unknown shapes. Theoretically, this
  can be done with the code below. Problems are two-fold: (1) If the shape was
  known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
  handle tensors that define the kernel size.
      shape = tf.shape(input_tensor)
      return = tf.pack([tf.minimum(shape[1], kernel_size[0]),
                        tf.minimum(shape[2], kernel_size[1])])

  """
  shape = input_tensor.get_shape().as_list()
  if shape[1] is None or shape[2] is None:
    kernel_size_out = kernel_size
  else:
    kernel_size_out = [min(shape[1], kernel_size[0]),
                       min(shape[2], kernel_size[1])]
  return kernel_size_out
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
  """Define kernel size which is automatically reduced for small input.

  If the shape of the input images is unknown at graph construction time this
  function assumes that the input images are is large enough.

  Args:
    input_tensor: input tensor of size [batch_size, height, width, channels].
    kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]

  Returns:
    a tensor with the kernel size.

  TODO(jrru): Make this function work with unknown shapes. Theoretically, this
  can be done with the code below. Problems are two-fold: (1) If the shape was
  known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
  handle tensors that define the kernel size.
      shape = tf.shape(input_tensor)
      return = tf.pack([tf.minimum(shape[1], kernel_size[0]),
                        tf.minimum(shape[2], kernel_size[1])])

  """
  shape = input_tensor.get_shape().as_list()
  if shape[1] is None or shape[2] is None:
    kernel_size_out = kernel_size
  else:
    kernel_size_out = [min(shape[1], kernel_size[0]),
                       min(shape[2], kernel_size[1])]
  return kernel_size_out