Python tensorflow.python.ops.array_ops 模块,reshape() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.reshape()

项目:opinatt    作者:epochx    | 项目源码 | 文件源码
def get_classification_loss(logits, targets, softmax_loss_function=None):
  bucket_outputs = logits
  if softmax_loss_function is None:
    assert len(bucket_outputs) == len(targets) == 1
    # We need to make target an int64-tensor and set its shape.
    bucket_target = array_ops.reshape(math_ops.to_int64(targets[0]), [-1])
    crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(logits=bucket_outputs[0],
                                                               labels=bucket_target)
  else:
    assert len(bucket_outputs) == len(targets) == 1
    crossent = softmax_loss_function(bucket_outputs[0], targets[0])

  batch_size = array_ops.shape(targets[0])[0]
  loss = tf.reduce_sum(crossent) / math_ops.cast(batch_size, dtypes.float32)

  return loss
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def _batch_shuffle(index_array, batch_size):
          """Shuffles an array in a batch-wise fashion.

          Useful for shuffling HDF5 arrays
          (where one cannot access arbitrary indices).

          Arguments:
              index_array: array of indices to be shuffled.
              batch_size: integer.

          Returns:
              The `index_array` array, shuffled in a batch-wise fashion.
          """
          batch_count = int(len(index_array) / batch_size)
          # to reshape we need to be cleanly divisible by batch size
          # we stash extra items and reappend them after shuffling
          last_batch = index_array[batch_count * batch_size:]
          index_array = index_array[:batch_count * batch_size]
          index_array = index_array.reshape((batch_count, batch_size))
          np.random.shuffle(index_array)
          index_array = index_array.flatten()
          return np.append(index_array, last_batch)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _linear_predictions(self, examples):
    """Returns predictions of the form w*x."""
    with name_scope('sdca/prediction'):
      sparse_variables = self._convert_n_to_tensor(self._variables[
          'sparse_features_weights'])
      result = 0.0
      for sfc, sv in zip(examples['sparse_features'], sparse_variables):
        # TODO(sibyl-Aix6ihai): following does not take care of missing features.
        result += math_ops.segment_sum(
            math_ops.mul(
                array_ops.gather(sv, sfc.feature_indices), sfc.feature_values),
            sfc.example_indices)
      dense_features = self._convert_n_to_tensor(examples['dense_features'])
      dense_variables = self._convert_n_to_tensor(self._variables[
          'dense_features_weights'])

      for i in range(len(dense_variables)):
        result += math_ops.matmul(dense_features[i], array_ops.expand_dims(
            dense_variables[i], -1))

    # Reshaping to allow shape inference at graph construction time.
    return array_ops.reshape(result, [-1])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=False):
    """Returns predicted classes for given features.

    Args:
      x: features.
      input_fn: Input function. If set, x must be None.
      batch_size: Override default batch size.
      as_iterable: If True, return an iterable which keeps yielding predictions
        for each example until inputs are exhausted. Note: The inputs must
        terminate if you want the iterable to terminate (e.g. be sure to pass
        num_epochs=1 if you are using something like read_batch_features).

    Returns:
      Numpy array of predicted classes (or an iterable of predicted classes if
      as_iterable is True).
    """
    preds = self._estimator.predict(x=x, input_fn=input_fn,
                                    batch_size=batch_size, outputs=[_CLASSES],
                                    as_iterable=as_iterable)
    if as_iterable:
      return _as_iterable(preds, output=_CLASSES)
    return preds[_CLASSES].reshape(-1)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def insert_transformed_feature(self, columns_to_tensors):
    """Apply transformation and inserts it into columns_to_tensors.

    Args:
      columns_to_tensors: A mapping from feature columns to tensors. 'string'
        key means a base feature (not-transformed). It can have _FeatureColumn
        as a key too. That means that _FeatureColumn is already transformed.
    """
    # Transform the input tensor according to the normalizer function + reshape.
    input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name])
    batch_size = input_tensor.get_shape().as_list()[0]
    batch_size = int(batch_size) if batch_size else -1
    flattened_shape = [batch_size, self.dimension]
    columns_to_tensors[self] = array_ops.reshape(
        math_ops.to_float(input_tensor), flattened_shape, name="reshape")

  # pylint: disable=unused-argument
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _attention(self, query, attn_states):
    conv2d = nn_ops.conv2d
    reduce_sum = math_ops.reduce_sum
    softmax = nn_ops.softmax
    tanh = math_ops.tanh

    with vs.variable_scope("Attention"):
      k = vs.get_variable("AttnW", [1, 1, self._attn_size, self._attn_vec_size])
      v = vs.get_variable("AttnV", [self._attn_vec_size])
      hidden = array_ops.reshape(attn_states,
                                 [-1, self._attn_length, 1, self._attn_size])
      hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
      y = _linear(query, self._attn_vec_size, True)
      y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
      s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
      a = softmax(s)
      d = reduce_sum(
          array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
      new_attns = array_ops.reshape(d, [-1, self._attn_size])
      new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
      return new_attns, new_attn_states
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _make_eval_func(self, tensors, session, feed_dict, fetches,
                      callback=None):
    """Construct a function that evaluates a `Tensor` or list of `Tensor`s."""
    if not isinstance(tensors, list):
      tensors = [tensors]
    num_tensors = len(tensors)

    def eval_func(x):
      """Function to evaluate a `Tensor`."""
      augmented_feed_dict = {
          var: x[packing_slice].reshape(_get_shape_tuple(var))
          for var, packing_slice in zip(self._vars, self._packing_slices)
      }
      augmented_feed_dict.update(feed_dict)
      augmented_fetches = tensors + fetches

      augmented_fetch_vals = session.run(
          augmented_fetches, feed_dict=augmented_feed_dict)

      if callable(callback):
        callback(*augmented_fetch_vals[num_tensors:])

      return augmented_fetch_vals[:num_tensors]

    return eval_func
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _linear_predictions(self, examples):
    """Returns predictions of the form w*x."""
    with name_scope('sdca/prediction'):
      sparse_variables = self._convert_n_to_tensor(self._variables[
          'sparse_features_weights'])
      result = 0.0
      for sfc, sv in zip(examples['sparse_features'], sparse_variables):
        # TODO(sibyl-Aix6ihai): following does not take care of missing features.
        result += math_ops.segment_sum(
            math_ops.mul(
                array_ops.gather(sv, sfc.feature_indices), sfc.feature_values),
            sfc.example_indices)
      dense_features = self._convert_n_to_tensor(examples['dense_features'])
      dense_variables = self._convert_n_to_tensor(self._variables[
          'dense_features_weights'])

      for i in range(len(dense_variables)):
        result += math_ops.matmul(dense_features[i], array_ops.expand_dims(
            dense_variables[i], -1))

    # Reshaping to allow shape inference at graph construction time.
    return array_ops.reshape(result, [-1])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self, label_name, weight_column_name, enable_centered_bias,
               head_name, thresholds):
    def loss_fn(logits, labels):
      check_shape_op = control_flow_ops.Assert(
          math_ops.less_equal(array_ops.rank(labels), 2),
          ["labels shape should be either [batch_size, 1] or [batch_size]"])
      with ops.control_dependencies([check_shape_op]):
        labels = array_ops.reshape(
            labels, shape=[array_ops.shape(labels)[0], 1])
      return losses.hinge_loss(logits, labels)

    super(_BinarySvmHead, self).__init__(
        train_loss_fn=loss_fn,
        eval_loss_fn=loss_fn,
        n_classes=2,
        label_name=label_name,
        weight_column_name=weight_column_name,
        enable_centered_bias=enable_centered_bias,
        head_name=head_name,
        thresholds=thresholds)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _dense_inner_flatten(inputs, new_rank):
  """Helper function for `inner_flatten`."""
  rank_assertion = check_ops.assert_rank_at_least(
      inputs, new_rank, message='inputs has rank less than new_rank')
  with ops.control_dependencies([rank_assertion]):
    outer_dimensions = array_ops.slice(
        array_ops.shape(inputs), [0], [new_rank - 1])
    new_shape = array_ops.concat(0, (outer_dimensions, [-1]))
    reshaped = array_ops.reshape(inputs, new_shape)

  # if `new_rank` is an integer, try to calculate new shape.
  if isinstance(new_rank, six.integer_types):
    static_shape = inputs.get_shape()
    if static_shape is not None and static_shape.dims is not None:
      static_shape = static_shape.as_list()
      static_outer_dims = static_shape[:new_rank - 1]
      static_inner_dims = static_shape[new_rank - 1:]
      flattened_dimension = 1
      for inner_dim in static_inner_dims:
        if inner_dim is None:
          flattened_dimension = None
          break
        flattened_dimension *= inner_dim
      reshaped.set_shape(static_outer_dims + [flattened_dimension])
  return reshaped
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def softmax(logits, scope=None):
  """Performs softmax on Nth dimension of N-dimensional logit tensor.

  For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension
  needs to have a specified number of elements (number of classes).

  Args:
    logits: N-dimensional `Tensor` with logits, where N > 1.
    scope: Optional scope for variable_scope.

  Returns:
    a `Tensor` with same shape and type as logits.
  """
  # TODO(jrru): Add axis argument which defaults to last dimension.
  with variable_scope.variable_scope(scope, 'softmax', [logits]):
    num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)
    logits_2d = array_ops.reshape(logits, [-1, num_logits])
    predictions = nn.softmax(logits_2d)
    predictions = array_ops.reshape(predictions, array_ops.shape(logits))
    predictions.set_shape(logits.get_shape())
    return predictions
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
  """Rename an axis of LabeledTensor.

  Args:
    labeled_tensor: The input tensor.
    existing_name: Name for an existing axis on the input.
    new_name: Desired replacement name.
    name: Optional op name.

  Returns:
    LabeledTensor with renamed axis.

  Raises:
    ValueError: If `existing_name` is not an axis on the input.
  """
  with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
    if existing_name not in labeled_tensor.axes:
      raise ValueError('existing_name %r are not contained in the set of axis '
                       'names %r on the input labeled tensor' %
                       (existing_name, labeled_tensor.axes.keys()))
    new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
    return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _attention(self, query, attn_states):
    conv2d = nn_ops.conv2d
    reduce_sum = math_ops.reduce_sum
    softmax = nn_ops.softmax
    tanh = math_ops.tanh

    with vs.variable_scope("Attention"):
      k = vs.get_variable("AttnW", [1, 1, self._attn_size, self._attn_vec_size])
      v = vs.get_variable("AttnV", [self._attn_vec_size])
      hidden = array_ops.reshape(attn_states,
                                 [-1, self._attn_length, 1, self._attn_size])
      hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
      y = _linear(query, self._attn_vec_size, True)
      y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
      s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
      a = softmax(s)
      d = reduce_sum(
          array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
      new_attns = array_ops.reshape(d, [-1, self._attn_size])
      new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
      return new_attns, new_attn_states
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _make_eval_func(self, tensors, session, feed_dict, fetches,
                      callback=None):
    """Construct a function that evaluates a `Tensor` or list of `Tensor`s."""
    if not isinstance(tensors, list):
      tensors = [tensors]
    num_tensors = len(tensors)

    def eval_func(x):
      """Function to evaluate a `Tensor`."""
      augmented_feed_dict = {
          var: x[packing_slice].reshape(_get_shape_tuple(var))
          for var, packing_slice in zip(self._vars, self._packing_slices)
      }
      augmented_feed_dict.update(feed_dict)
      augmented_fetches = tensors + fetches

      augmented_fetch_vals = session.run(
          augmented_fetches, feed_dict=augmented_feed_dict)

      if callable(callback):
        callback(*augmented_fetch_vals[num_tensors:])

      return augmented_fetch_vals[:num_tensors]

    return eval_func
项目:tensorflow-kr    作者:tensorflowkorea    | 项目源码 | 文件源码
def _zero_out_grad(op, grad):
  """The gradients for `zero_out`.

  Args:
    op: The `zero_out` `Operation` that we are differentiating, which we can use
      to find the inputs and outputs of the original op.
    grad: Gradient with respect to the output of the `zero_out` op.

  Returns:
    Gradients with respect to the input of `zero_out`.
  """
  to_zero = op.inputs[0]
  shape = array_ops.shape(to_zero)
  index = array_ops.zeros_like(shape)
  first_grad = array_ops.reshape(grad, [-1])[0]
  to_zero_grad = sparse_ops.sparse_to_dense(index, shape, first_grad, 0)
  return [to_zero_grad]  # List of one Tensor, since we have one input
项目:DL-Benchmarks    作者:DL-Benchmarks    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with vs.variable_scope(scope or type(self).__name__):  # "EmbeddingWrapper"
      with ops.device("/cpu:0"):
        if self._embedding:
          embedding = self._embedding
        else:
          if self._initializer:
            initializer = self._initializer
          elif vs.get_variable_scope().initializer:
            initializer = vs.get_variable_scope().initializer
          else:
            # Default initializer for embeddings should have variance=1.
            sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
            initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
          embedding = vs.get_variable("embedding", [self._embedding_classes,
                                                    self._cell.input_size],
                                      initializer=initializer)
        embedded = embedding_ops.embedding_lookup(
            embedding, array_ops.reshape(inputs, [-1]))
    return self._cell(embedded, state)
项目:odin    作者:imito    | 项目源码 | 文件源码
def call(self, inputs, state):
    """Long short-term memory cell with attention (LSTMA)."""
    state, attns, attn_states = state
    attn_states = array_ops.reshape(attn_states,
                                    [-1, self._attn_length, self._attn_size])
    input_size = self._input_size
    if input_size is None:
      input_size = inputs.get_shape().as_list()[1]
    inputs = _linear([inputs, attns], input_size, True)
    lstm_output, new_state = self._cell(inputs, state)
    new_state_cat = array_ops.concat(nest.flatten(new_state), 1)
    new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
    with tf.variable_scope("attn_output_projection"):
      output = _linear([lstm_output, new_attns], self._attn_size, True)
    new_attn_states = array_ops.concat(
        [new_attn_states, array_ops.expand_dims(output, 1)], 1)
    new_attn_states = array_ops.reshape(
        new_attn_states, [-1, self._attn_length * self._attn_size])
    new_state = (new_state, new_attns, new_attn_states)
    return output, new_state
项目:odin    作者:imito    | 项目源码 | 文件源码
def _attention(self, query, attn_states):
    conv2d = nn_ops.conv2d
    reduce_sum = math_ops.reduce_sum
    softmax = nn_ops.softmax
    tanh = math_ops.tanh

    with tf.variable_scope("attention"):
      k = tf.get_variable(
          "attn_w", [1, 1, self._attn_size, self._attn_vec_size])
      v = tf.get_variable("attn_v", [self._attn_vec_size])
      hidden = array_ops.reshape(attn_states,
                                 [-1, self._attn_length, 1, self._attn_size])
      hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
      y = _linear(query, self._attn_vec_size, True)
      y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
      s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
      a = softmax(s)
      d = reduce_sum(
          array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
      new_attns = array_ops.reshape(d, [-1, self._attn_size])
      new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
      return new_attns, new_attn_states
项目:seqGan_chatbot    作者:zpppy    | 项目源码 | 文件源码
def _argmax_or_mcsearch(embedding, output_projection=None, update_embedding=True, mc_search=False):
    def loop_function(prev, _):
        if output_projection is not None:
            prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1])


        if isinstance(mc_search, bool):
            #tf.multinomial???prev?????????  ?-1??????????
            prev_symbol = tf.reshape(tf.multinomial(prev, 1), [-1]) if mc_search else math_ops.argmax(prev, 1)
        else:
            prev_symbol = tf.cond(mc_search, lambda: tf.reshape(tf.multinomial(prev, 1), [-1]), lambda: tf.argmax(prev, 1))


        emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
        #???????????
        if not update_embedding:
            emb_prev = array_ops.stop_gradient(emb_prev)
        return emb_prev
    return loop_function
项目:seqGan_chatbot    作者:zpppy    | 项目源码 | 文件源码
def sequence_loss_by_mle(logits, targets, vocab_size, sequence_length, batch_size, output_projection=None):
    #print("logits: ", np.shape(logits[0]))
    #logits: [seq_len, batch_size, emb_dim]
    #targets: [seq_len, batch_size]  =====transpose====> [batch_size, seq_len]
    # labels = tf.to_int32(tf.transpose(targets))
    #targets: [seq_len, batch_size] ====reshape[-1]====> [seq_len * batch_size]
    labels = tf.to_int32(tf.reshape(targets, [-1]))

    if output_projection is not None:
      #logits = nn_ops.xw_plus_b(logits, output_projection[0], output_projection[1])
      logits = [tf.matmul(logit, output_projection[0]) + output_projection[1] for logit in logits]

    reshape_logits = tf.reshape(logits, [-1, vocab_size]) #[seq_len * batch_size, vocab_size]

    prediction = tf.clip_by_value(reshape_logits, 1e-20, 1.0)

    pretrain_loss = -tf.reduce_sum(
        # [seq_len * batch_size , vocab_size]
        tf.one_hot(labels, vocab_size, 1.0, 0.0) * tf.log(prediction)
    ) / (sequence_length * batch_size)
    return pretrain_loss
项目:seq2seq_parser    作者:trangham283    | 项目源码 | 文件源码
def sequence_loss_by_example(logits, targets, weights,
                             average_across_timesteps=True,
                             softmax_loss_function=None, name=None):
  if len(targets) != len(logits) or len(weights) != len(logits):
    raise ValueError("Lengths of logits, weights, and targets must be the same "
                     "%d, %d, %d." % (len(logits), len(weights), len(targets)))
  with tf.name_scope(name, "sequence_loss_by_example",logits + targets + weights):
    log_perp_list = []
    for logit, target, weight in zip(logits, targets, weights):
      if softmax_loss_function is None:
        # TODO(irving,ebrevdo): This reshape is needed because
        # sequence_loss_by_example is called with scalars sometimes, which
        # violates our general scalar strictness policy.
        target = array_ops.reshape(target, [-1])
        crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
            logit, target)
      else:
        crossent = softmax_loss_function(logit, target)
      log_perp_list.append(crossent * weight)
    log_perps = math_ops.add_n(log_perp_list)
    if average_across_timesteps:
      total_size = math_ops.add_n(weights)
      total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
      log_perps /= total_size
  return log_perps
项目:seq2seq_parser    作者:trangham283    | 项目源码 | 文件源码
def sequence_loss_by_example(logits, targets, weights,
                             average_across_timesteps=True,
                             softmax_loss_function=None, name=None):
  if len(targets) != len(logits) or len(weights) != len(logits):
    raise ValueError("Lengths of logits, weights, and targets must be the same "
                     "%d, %d, %d." % (len(logits), len(weights), len(targets)))
  with tf.name_scope(name, "sequence_loss_by_example",logits + targets + weights):
    log_perp_list = []
    for logit, target, weight in zip(logits, targets, weights):
      if softmax_loss_function is None:
        # TODO(irving,ebrevdo): This reshape is needed because
        # sequence_loss_by_example is called with scalars sometimes, which
        # violates our general scalar strictness policy.
        target = array_ops.reshape(target, [-1])
        crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
            logit, target)
      else:
        crossent = softmax_loss_function(logit, target)
      log_perp_list.append(crossent * weight)
    log_perps = math_ops.add_n(log_perp_list)
    if average_across_timesteps:
      total_size = math_ops.add_n(weights)
      total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
      log_perps /= total_size
  return log_perps
项目:seq2seq_parser    作者:trangham283    | 项目源码 | 文件源码
def sequence_loss_by_example(logits, targets, weights,
                             average_across_timesteps=True,
                             softmax_loss_function=None, name=None):
  if len(targets) != len(logits) or len(weights) != len(logits):
    raise ValueError("Lengths of logits, weights, and targets must be the same "
                     "%d, %d, %d." % (len(logits), len(weights), len(targets)))
  with tf.name_scope(name, "sequence_loss_by_example",logits + targets + weights):
    log_perp_list = []
    for logit, target, weight in zip(logits, targets, weights):
      if softmax_loss_function is None:
        # TODO(irving,ebrevdo): This reshape is needed because
        # sequence_loss_by_example is called with scalars sometimes, which
        # violates our general scalar strictness policy.
        target = array_ops.reshape(target, [-1])
        crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
            logit, target)
      else:
        crossent = softmax_loss_function(logit, target)
      log_perp_list.append(crossent * weight)
    log_perps = math_ops.add_n(log_perp_list)
    if average_across_timesteps:
      total_size = math_ops.add_n(weights)
      total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
      log_perps /= total_size
  return log_perps
项目:seq2seq_parser    作者:trangham283    | 项目源码 | 文件源码
def sequence_loss_by_example(logits, targets, weights,
                             average_across_timesteps=True,
                             softmax_loss_function=None, name=None):
  if len(targets) != len(logits) or len(weights) != len(logits):
    raise ValueError("Lengths of logits, weights, and targets must be the same "
                     "%d, %d, %d." % (len(logits), len(weights), len(targets)))
  with tf.name_scope(name, "sequence_loss_by_example",logits + targets + weights):
    log_perp_list = []
    for logit, target, weight in zip(logits, targets, weights):
      if softmax_loss_function is None:
        # TODO(irving,ebrevdo): This reshape is needed because
        # sequence_loss_by_example is called with scalars sometimes, which
        # violates our general scalar strictness policy.
        target = array_ops.reshape(target, [-1])
        crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
            logit, target)
      else:
        crossent = softmax_loss_function(logit, target)
      log_perp_list.append(crossent * weight)
    log_perps = math_ops.add_n(log_perp_list)
    if average_across_timesteps:
      total_size = math_ops.add_n(weights)
      total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
      log_perps /= total_size
  return log_perps
项目:seq2seq_parser    作者:trangham283    | 项目源码 | 文件源码
def sequence_loss_by_example(logits, targets, weights,
                             average_across_timesteps=True,
                             softmax_loss_function=None, name=None):
  if len(targets) != len(logits) or len(weights) != len(logits):
    raise ValueError("Lengths of logits, weights, and targets must be the same "
                     "%d, %d, %d." % (len(logits), len(weights), len(targets)))
  with tf.name_scope(name, "sequence_loss_by_example",logits + targets + weights):
    log_perp_list = []
    for logit, target, weight in zip(logits, targets, weights):
      if softmax_loss_function is None:
        # TODO(irving,ebrevdo): This reshape is needed because
        # sequence_loss_by_example is called with scalars sometimes, which
        # violates our general scalar strictness policy.
        target = array_ops.reshape(target, [-1])
        crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
            logit, target)
      else:
        crossent = softmax_loss_function(logit, target)
      log_perp_list.append(crossent * weight)
    log_perps = math_ops.add_n(log_perp_list)
    if average_across_timesteps:
      total_size = math_ops.add_n(weights)
      total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
      log_perps /= total_size
  return log_perps
项目:diversity_based_attention    作者:PrekshaNema25    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with vs.variable_scope(scope or type(self).__name__):  # "EmbeddingWrapper"
      with ops.device("/cpu:0"):
        if self._initializer:
          initializer = self._initializer
        elif vs.get_variable_scope().initializer:
          initializer = vs.get_variable_scope().initializer
        else:
          # Default initializer for embeddings should have variance=1.
          sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
          initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

        if type(state) is tuple:
          data_type = state[0].dtype
        else:
          data_type = state.dtype

        embedding = vs.get_variable(
            "embedding", [self._embedding_classes, self._embedding_size],
            initializer=initializer,
            dtype=data_type)
        embedded = embedding_ops.embedding_lookup(
            embedding, array_ops.reshape(inputs, [-1]))
    return self._cell(embedded, state)
项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def generate_image(self, image_format, image_shape):
        """Generates an image and an example containing the encoded image.

        Args:
            image_format: the encoding format of the image.
            image_shape: the shape of the image to generate.

        Returns:
            image: the generated image.
            example: a TF-example with a feature key 'image/encoded' set to the
                serialized image and a feature key 'image/format' set to the image
                encoding format ['jpeg', 'JPEG', 'png', 'PNG', 'raw'].
        """
        num_pixels = image_shape[0] * image_shape[1] * image_shape[2]
        image = np.linspace(0, num_pixels - 1, num=num_pixels).reshape(image_shape).astype(np.uint8)
        tf_encoded = self._encode(image, image_format)
        example = example_pb2.Example(features=feature_pb2.Features(feature={
            'image/encoded': self._encode_bytes_feature(tf_encoded),
            'image/format': self._string_feature(image_format)
        }))

        return image, example.SerializeToString()
项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def decode_example(self, serialized_example, item_handler, image_format):
        """Decodes the given serialized example with the specified item handler.

        Args:
            serialized_example: a serialized TF example string.
            item_handler: the item handler used to decode the image.
            image_format: the image format being decoded.

        Returns:
            the decoded image found in the serialized Example.
        """
        serialized_example = array_ops.reshape(serialized_example, shape=[])
        decoder = TFExampleDecoder(
            keys_to_features={
                'image/encoded': tf.FixedLenFeature((), dtypes.string, default_value=''),
                'image/format': tf.FixedLenFeature((), dtypes.string, default_value=image_format),
            },
            items_to_handlers={'image': item_handler})
        [tf_image] = decoder.decode(serialized_example, ['image'])
        return tf_image
项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def test_decode_example_with_string_tensor(self):
        tensor_shape = (2, 3, 1)
        np_array = np.array([[['ab'], ['cd'], ['ef']], [['ghi'], ['jkl'], ['mnop']]])

        example = example_pb2.Example(features=feature_pb2.Features(feature={
            'labels': self._bytes_feature(np_array),
        }))

        serialized_example = example.SerializeToString()

        with self.test_session():
            serialized_example = array_ops.reshape(serialized_example, shape=[])
            keys_to_features = {
                'labels': parsing_ops.FixedLenFeature(
                    tensor_shape, dtypes.string,
                    default_value=constant_op.constant('', shape=tensor_shape, dtype=dtypes.string))
            }
            items_to_handlers = {'labels': tfexample_decoder.Tensor('labels')}
            decoder = TFExampleDecoder(keys_to_features, items_to_handlers)
            [tf_labels] = decoder.decode(serialized_example, ['labels'])
            labels = tf_labels.eval()

            labels = labels.astype(np_array.dtype)
            self.assertTrue(np.array_equal(np_array, labels))
项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def test_decode_example_with_float_tensor(self):
        np_array = np.random.rand(2, 3, 1).astype('f')

        example = example_pb2.Example(features=feature_pb2.Features(feature={
            'array': self._encode_float_feature(np_array),
        }))

        serialized_example = example.SerializeToString()

        with self.test_session():
            serialized_example = array_ops.reshape(serialized_example, shape=[])
            keys_to_features = {
                'array': parsing_ops.FixedLenFeature(np_array.shape, dtypes.float32)
            }
            items_to_handlers = {'array': tfexample_decoder.Tensor('array'), }
            decoder = TFExampleDecoder(keys_to_features, items_to_handlers)
            [tf_array] = decoder.decode(serialized_example, ['array'])
            self.assertAllEqual(tf_array.eval(), np_array)
项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def test_decode_example_with_iInt64_tensor(self):
        np_array = np.random.randint(1, 10, size=(2, 3, 1))

        example = example_pb2.Example(features=feature_pb2.Features(feature={
            'array': self._encode_int64_feature(np_array),
        }))

        serialized_example = example.SerializeToString()

        with self.test_session():
            serialized_example = array_ops.reshape(serialized_example, shape=[])
            keys_to_features = {
                'array': parsing_ops.FixedLenFeature(np_array.shape, dtypes.int64)
            }
            items_to_handlers = {'array': tfexample_decoder.Tensor('array'), }
            decoder = TFExampleDecoder(keys_to_features, items_to_handlers)
            [tf_array] = decoder.decode(serialized_example, ['array'])
            self.assertAllEqual(tf_array.eval(), np_array)
项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def test_decode_example_with_fix_len_tensor_with_shape(self):
        np_array = np.array([[1, 2, 3], [4, 5, 6]])

        example = example_pb2.Example(features=feature_pb2.Features(feature={
            'labels': self._encode_int64_feature(np_array),
        }))

        serialized_example = example.SerializeToString()

        with self.test_session():
            serialized_example = array_ops.reshape(serialized_example, shape=[])
            keys_to_features = {
                'labels': parsing_ops.FixedLenFeature(np_array.shape, dtype=dtypes.int64),
            }
            items_to_handlers = {
                'labels': tfexample_decoder.Tensor('labels', shape=np_array.shape),
            }
            decoder = TFExampleDecoder(keys_to_features, items_to_handlers)
            [tf_labels] = decoder.decode(serialized_example, ['labels'])
            labels = tf_labels.eval()
            self.assertAllEqual(labels, np_array)
项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def test_decode_example_with_var_len_tensor_to_dense(self):
        np_array = np.array([[1, 2, 3], [4, 5, 6]])
        example = example_pb2.Example(features=feature_pb2.Features(feature={
            'labels': self._encode_int64_feature(np_array),
        }))

        serialized_example = example.SerializeToString()

        with self.test_session():
            serialized_example = array_ops.reshape(serialized_example, shape=[])
            keys_to_features = {
                'labels': parsing_ops.VarLenFeature(dtype=dtypes.int64),
            }
            items_to_handlers = {
                'labels': tfexample_decoder.Tensor(
                    'labels', shape=np_array.shape),
            }
            decoder = TFExampleDecoder(keys_to_features, items_to_handlers)
            [tf_labels] = decoder.decode(serialized_example, ['labels'])
            labels = tf_labels.eval()
            self.assertAllEqual(labels, np_array)
项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def test_decode_example_with_sparse_tensor(self):
        np_indices = np.array([[1], [2], [5]])
        np_values = np.array([0.1, 0.2, 0.6]).astype('f')
        example = example_pb2.Example(features=feature_pb2.Features(feature={
            'indices': self._encode_int64_feature(np_indices),
            'values': self._encode_float_feature(np_values),
        }))

        serialized_example = example.SerializeToString()

        with self.test_session():
            serialized_example = array_ops.reshape(serialized_example, shape=[])
            keys_to_features = {
                'indices': parsing_ops.VarLenFeature(dtype=dtypes.int64),
                'values': parsing_ops.VarLenFeature(dtype=dtypes.float32),
            }
            items_to_handlers = {'labels': tfexample_decoder.SparseTensor()}
            decoder = TFExampleDecoder(keys_to_features, items_to_handlers)
            [tf_labels] = decoder.decode(serialized_example, ['labels'])
            labels = tf_labels.eval()
            self.assertAllEqual(labels.indices, np_indices)
            self.assertAllEqual(labels.values, np_values)
            self.assertAllEqual(labels.dense_shape, np_values.shape)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def next_inputs(self, time, outputs, state, sample_ids, name=None):
    with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
                        [time, outputs, state, sample_ids]):
      (finished, base_next_inputs, state) = (
          super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
              time=time,
              outputs=outputs,
              state=state,
              sample_ids=sample_ids,
              name=name))

      def maybe_sample():
        """Perform scheduled sampling."""
        where_sampling = math_ops.cast(
            array_ops.where(sample_ids > -1), dtypes.int32)
        where_not_sampling = math_ops.cast(
            array_ops.where(sample_ids <= -1), dtypes.int32)
        where_sampling_flat = array_ops.reshape(where_sampling, [-1])
        where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1])
        sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat)
        inputs_not_sampling = array_ops.gather(
            base_next_inputs, where_not_sampling_flat)
        sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
        base_shape = array_ops.shape(base_next_inputs)
        return (array_ops.scatter_nd(indices=where_sampling,
                                     updates=sampled_next_inputs,
                                     shape=base_shape)
                + array_ops.scatter_nd(indices=where_not_sampling,
                                       updates=inputs_not_sampling,
                                       shape=base_shape))

      all_finished = math_ops.reduce_all(finished)
      next_inputs = control_flow_ops.cond(
          all_finished, lambda: base_next_inputs, maybe_sample)
      return (finished, next_inputs, state)
项目:tensorflow_seq2seq_chatbot    作者:higepon    | 项目源码 | 文件源码
def sequence_loss_by_example(logits, targets, weights,
                             average_across_timesteps=True,
                             softmax_loss_function=None, name=None):
  """Weighted cross-entropy loss for a sequence of logits (per example).

  Args:
    logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
    targets: List of 1D batch-sized int32 Tensors of the same length as logits.
    weights: List of 1D batch-sized float-Tensors of the same length as logits.
    average_across_timesteps: If set, divide the returned cost by the total
      label weight.
    softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
      to be used instead of the standard softmax (the default if this is None).
    name: Optional name for this operation, default: "sequence_loss_by_example".

  Returns:
    1D batch-sized float Tensor: The log-perplexity for each sequence.

  Raises:
    ValueError: If len(logits) is different from len(targets) or len(weights).
  """
  if len(targets) != len(logits) or len(weights) != len(logits):
    raise ValueError("Lengths of logits, weights, and targets must be the same "
                     "%d, %d, %d." % (len(logits), len(weights), len(targets)))
  with ops.name_scope( name,
                    "sequence_loss_by_example",logits + targets + weights):
    log_perp_list = []
    for logit, target, weight in zip(logits, targets, weights):
      if softmax_loss_function is None:
        target = array_ops.reshape(target, [-1])
        crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
            logit, target)
      else:
        crossent = softmax_loss_function(logit, target)
      log_perp_list.append(crossent * weight)
    log_perps = math_ops.add_n(log_perp_list)
    if average_across_timesteps:
      total_size = math_ops.add_n(weights)
      total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
      log_perps /= total_size
  return log_perps
项目:chatbot-generative    作者:DeeChat    | 项目源码 | 文件源码
def _inference(self):
        print('Creating inference...')
        # If we use sampled softmax, we need an output projection.
        # Sampled softmax only makes sense if we sample less than vocabulary size.
        if 0 < config.NUM_SAMPLES < self.vocab_size['decoder']:
            w_t = tf.get_variable('proj_w', [self.vocab_size['decoder'], config.HIDDEN_SIZE])
            # `config.HIDDEN_SIZE` is the same as embedding size
            w = tf.transpose(w_t)
            b = tf.get_variable('proj_b', [self.vocab_size['decoder']])
            self.output_projection = (w, b)

        def sampled_loss(labels, logits):
            labels = tf.reshape(labels, [-1, 1])
            local_w_t = tf.cast(w_t, tf.float32)
            local_b = tf.cast(b, tf.float32)
            local_inputs = tf.cast(logits, tf.float32)
            return tf.nn.sampled_softmax_loss(
                weights=local_w_t,
                biases=local_b,
                labels=labels,
                inputs=local_inputs,
                num_sampled=config.NUM_SAMPLES,
                num_classes=self.vocab_size['decoder'])

        self.softmax_loss_function = sampled_loss

        enc_cell = tf.contrib.rnn.GRUCell(config.HIDDEN_SIZE)

        self.enc_cell = tf.contrib.rnn.MultiRNNCell([enc_cell] * config.NUM_LAYERS)
        dec_cell = tf.contrib.rnn.GRUCell(config.HIDDEN_SIZE)
        self.dec_cell = tf.contrib.rnn.MultiRNNCell([dec_cell] * config.NUM_LAYERS)
项目:opinatt    作者:epochx    | 项目源码 | 文件源码
def get_sequence_loss(logits, targets, weights, softmax_loss_function=None, per_example_loss=False):

  if per_example_loss:
    assert len(logits) == len(targets)
    # We need to make target and int64-tensor and set its shape.
    bucket_target = [array_ops.reshape(math_ops.to_int64(x), [-1]) for x in targets]
    crossent = sequence_loss_by_example(logits, bucket_target, weights,
                                              softmax_loss_function=softmax_loss_function)
  else:
    assert len(logits) == len(targets)
    bucket_target = [array_ops.reshape(math_ops.to_int64(x), [-1]) for x in targets]
    crossent = sequence_loss_by_batch(logits, bucket_target, weights,
                                      softmax_loss_function=softmax_loss_function)

  return crossent
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def next_inputs(self, time, outputs, state, sample_ids, name=None):
    with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
                        [time, outputs, state, sample_ids]):
      (finished, base_next_inputs, state) = (
          super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
              time=time,
              outputs=outputs,
              state=state,
              sample_ids=sample_ids,
              name=name))

      def maybe_sample():
        """Perform scheduled sampling."""
        where_sampling = math_ops.cast(
            array_ops.where(sample_ids > -1), dtypes.int32)
        where_not_sampling = math_ops.cast(
            array_ops.where(sample_ids <= -1), dtypes.int32)
        where_sampling_flat = array_ops.reshape(where_sampling, [-1])
        where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1])
        sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat)
        inputs_not_sampling = array_ops.gather(
            base_next_inputs, where_not_sampling_flat)
        sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
        base_shape = array_ops.shape(base_next_inputs)
        return (array_ops.scatter_nd(indices=where_sampling,
                                     updates=sampled_next_inputs,
                                     shape=base_shape)
                + array_ops.scatter_nd(indices=where_not_sampling,
                                       updates=inputs_not_sampling,
                                       shape=base_shape))

      all_finished = math_ops.reduce_all(finished)
      next_inputs = control_flow_ops.cond(
          all_finished, lambda: base_next_inputs, maybe_sample)
      return (finished, next_inputs, state)
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def _pack(cls, tensors):
    """Pack a list of `Tensor`s into a single, flattened, rank-1 `Tensor`."""
    if not tensors:
      return None
    elif len(tensors) == 1:
      return array_ops.reshape(tensors[0], [-1])
    else:
      flattened = [array_ops.reshape(tensor, [-1]) for tensor in tensors]
      return array_ops.concat(flattened, 0)
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def _make_eval_func(self, tensors, session, feed_dict, fetches,
                      callback=None):
    """Construct a function that evaluates a `Tensor` or list of `Tensor`s."""
    if not isinstance(tensors, list):
      tensors = [tensors]
    num_tensors = len(tensors)

    def eval_func(x):
      """Function to evaluate a `Tensor`."""
      shapes = dict(zip(self._vars, self._var_shapes))
      augmented_feed_dict = {
          var: x[packing_slice].reshape(shapes[var])
          for var, packing_slice in zip(self._vars, self._packing_slices)
      }
      augmented_feed_dict.update(feed_dict)
      augmented_fetches = tensors + fetches

      augmented_fetch_vals = session.run(
          augmented_fetches, feed_dict=augmented_feed_dict)

      if callable(callback):
        callback(*augmented_fetch_vals[num_tensors:])

      return augmented_fetch_vals[:num_tensors]

    return eval_func
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
      """Computes mean and std for batch then apply batch_normalization on batch.

      Arguments:
          x: Input tensor or variable.
          gamma: Tensor by which to scale the input.
          beta: Tensor with which to center the input.
          reduction_axes: iterable of integers,
              axes over which to normalize.
          epsilon: Fuzz factor.

      Returns:
          A tuple length of 3, `(normalized_tensor, mean, variance)`.
      """
      mean, var = nn.moments(
          x, reduction_axes, shift=None, name=None, keep_dims=False)
      if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
        normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
      else:
        # need broadcasting
        target_shape = []
        for axis in range(ndim(x)):
          if axis in reduction_axes:
            target_shape.append(1)
          else:
            target_shape.append(array_ops.shape(x)[axis])
        target_shape = array_ops.stack(target_shape)

        broadcast_mean = array_ops.reshape(mean, target_shape)
        broadcast_var = array_ops.reshape(var, target_shape)
        if gamma is None:
          broadcast_gamma = None
        else:
          broadcast_gamma = array_ops.reshape(gamma, target_shape)
        if beta is None:
          broadcast_beta = None
        else:
          broadcast_beta = array_ops.reshape(beta, target_shape)
        normed = nn.batch_normalization(x, broadcast_mean, broadcast_var,
                                        broadcast_beta, broadcast_gamma, epsilon)
      return normed, mean, var
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def reshape(x, shape):
      """Reshapes a tensor to the specified shape.

      Arguments:
          x: Tensor or variable.
          shape: Target shape tuple.

      Returns:
          A tensor.
      """
      return array_ops.reshape(x, shape)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def flatten(x):
      """Flatten a tensor.

      Arguments:
          x: A tensor or variable.

      Returns:
          A tensor, reshaped into 1-D
      """
      return array_ops.reshape(x, [-1])
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def batch_flatten(x):
      """Turn a nD tensor into a 2D tensor with same 0th dimension.

      In other words, it flattens each data samples of a batch.

      Arguments:
          x: A tensor or variable.

      Returns:
          A tensor.
      """
      x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])]))
      return x
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def sparse_categorical_crossentropy(output, target, from_logits=False):
      """Categorical crossentropy with integer targets.

      Arguments:
          output: A tensor resulting from a softmax
              (unless `from_logits` is True, in which
              case `output` is expected to be the logits).
          target: An integer tensor.
          from_logits: Boolean, whether `output` is the
              result of a softmax, or is a tensor of logits.

      Returns:
          Output tensor.
      """
      # Note: nn.softmax_cross_entropy_with_logits
      # expects logits, Keras expects probabilities.
      if not from_logits:
        epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
        output = clip_ops.clip_by_value(output, epsilon, 1 - epsilon)
        output = math_ops.log(output)

      output_shape = output.get_shape()
      targets = cast(flatten(target), 'int64')
      logits = array_ops.reshape(output, [-1, int(output_shape[-1])])
      res = nn.sparse_softmax_cross_entropy_with_logits(
          labels=targets, logits=logits)
      if len(output_shape) == 3:
        # if our output includes timesteps we need to reshape
        return array_ops.reshape(res, array_ops.shape(output)[:-1])
      else:
        return res
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def convert_dense_weights_data_format(dense,
                                              previous_feature_map_shape,
                                              target_data_format='channels_first'):
          """Utility useful when changing a convnet's `data_format`.

          When porting the weights of a convnet from one data format to the other,
          if the convnet includes a `Flatten` layer
          (applied to the last convolutional feature map)
          followed by a `Dense` layer, the weights of that `Dense` layer
          should be updated to reflect the new dimension ordering.

          Arguments:
              dense: The target `Dense` layer.
              previous_feature_map_shape: A shape tuple of 3 integers,
                  e.g. `(512, 7, 7)`. The shape of the convolutional
                  feature map right before the `Flatten` layer that
                  came before the target `Dense` layer.
              target_data_format: One of "channels_last", "channels_first".
                  Set it "channels_last"
                  if converting a "channels_first" model to "channels_last",
                  or reciprocally.
          """
          assert target_data_format in {'channels_last', 'channels_first'}
          kernel, bias = dense.get_weights()
          for i in range(kernel.shape[1]):
            if target_data_format == 'channels_first':
              c, h, w = previous_feature_map_shape
              original_fm_shape = (h, w, c)
              ki = kernel[:, i].reshape(original_fm_shape)
              ki = np.transpose(ki, (2, 0, 1))  # last -> first
            else:
              h, w, c = previous_feature_map_shape
              original_fm_shape = (c, h, w)
              ki = kernel[:, i].reshape(original_fm_shape)
              ki = np.transpose(ki, (1, 2, 0))  # first -> last
            kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
          dense.set_weights([kernel, bias])
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def img_to_array(img, data_format=None):
          """Converts a PIL Image instance to a Numpy array.

          Arguments:
              img: PIL Image instance.
              data_format: Image data format.

          Returns:
              A 3D Numpy array.

          Raises:
              ValueError: if invalid `img` or `data_format` is passed.
          """
          if data_format is None:
            data_format = K.image_data_format()
          if data_format not in {'channels_first', 'channels_last'}:
            raise ValueError('Unknown data_format: ', data_format)
          # Numpy array x has format (height, width, channel)
          # or (channel, height, width)
          # but original PIL image has format (width, height, channel)
          x = np.asarray(img, dtype=K.floatx())
          if len(x.shape) == 3:
            if data_format == 'channels_first':
              x = x.transpose(2, 0, 1)
          elif len(x.shape) == 2:
            if data_format == 'channels_first':
              x = x.reshape((1, x.shape[0], x.shape[1]))
            else:
              x = x.reshape((x.shape[0], x.shape[1], 1))
          else:
            raise ValueError('Unsupported image shape: ', x.shape)
          return x

        # load image file into array with a given target_size rather than original size of image
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def call(self, inputs):
            # In case the target shape is not fully defined,
            # we need access to the shape of x.
            target_shape = self.target_shape
            if -1 in target_shape:
              # target shape not fully defined
              target_shape = self._compute_output_shape(inputs.get_shape())
              target_shape = target_shape.as_list()[1:]
            return K.reshape(inputs, (-1,) + tuple(target_shape))
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def build(self, input_shape):
            # Used purely for shape validation.
            if not isinstance(input_shape, list):
              raise ValueError('A merge layer should be called ' 'on a list of inputs.')
            if len(input_shape) < 2:
              raise ValueError('A merge layer should be called '
                               'on a list of at least 2 inputs. '
                               'Got ' + str(len(input_shape)) + ' inputs.')
            input_shape = [tensor_shape.TensorShape(s).as_list() for s in input_shape]
            batch_sizes = [s[0] for s in input_shape if s is not None]
            batch_sizes = set(batch_sizes)
            batch_sizes -= set([None])
            if len(batch_sizes) > 1:
              raise ValueError('Can not merge tensors with different '
                               'batch sizes. Got tensors with shapes : ' +
                               str(input_shape))
            if input_shape[0] is None:
              output_shape = None
            else:
              output_shape = input_shape[0][1:]
            for i in range(1, len(input_shape)):
              if input_shape[i] is None:
                shape = None
              else:
                shape = input_shape[i][1:]
              output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)
            # If the inputs have different ranks, we have to reshape them
            # to make them broadcastable.
            if None not in input_shape and len(set(map(len, input_shape))) == 1:
              self._reshape_required = False
            else:
              self._reshape_required = True
            self.built = True