Python tensorflow.python.ops.array_ops 模块,where() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.where()

项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def average_impurity(self):
    """Constructs a TF graph for evaluating the average leaf impurity of a tree.

    If in regression mode, this is the leaf variance. If in classification mode,
    this is the gini impurity.

    Returns:
      The last op in the graph.
    """
    children = array_ops.squeeze(array_ops.slice(
        self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
    is_leaf = math_ops.equal(constants.LEAF_NODE, children)
    leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
                                                 squeeze_dims=[1]))
    counts = array_ops.gather(self.variables.node_sums, leaves)
    gini = self._weighted_gini(counts)
    # Guard against step 1, when there often are no leaves yet.
    def impurity():
      return gini
    # Since average impurity can be used for loss, when there's no data just
    # return a big number so that loss always decreases.
    def big():
      return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
    return control_flow_ops.cond(math_ops.greater(
        array_ops.shape(leaves)[0], 0), impurity, big)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def random_binomial(shape, p=0.0, dtype=None, seed=None):
      """Returns a tensor with random binomial distribution of values.

      Arguments:
          shape: A tuple of integers, the shape of tensor to create.
          p: A float, `0. <= p <= 1`, probability of binomial distribution.
          dtype: String, dtype of returned tensor.
          seed: Integer, random seed.

      Returns:
          A tensor.
      """
      if dtype is None:
        dtype = floatx()
      if seed is None:
        seed = np.random.randint(10e6)
      return array_ops.where(
          random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p,
          array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def lecun_uniform(seed=None):
      """LeCun uniform initializer.

      It draws samples from a uniform distribution within [-limit, limit]
      where `limit` is `sqrt(3 / fan_in)`
      where `fan_in` is the number of input units in the weight tensor.

      Arguments:
          seed: A Python integer. Used to seed the random generator.

      Returns:
          An initializer.

      References:
          LeCun 98, Efficient Backprop,
          http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
      """
      return VarianceScaling(
          scale=1., mode='fan_in', distribution='uniform', seed=seed)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def glorot_normal(seed=None):
      """Glorot normal initializer, also called Xavier normal initializer.

      It draws samples from a truncated normal distribution centered on 0
      with `stddev = sqrt(2 / (fan_in + fan_out))`
      where `fan_in` is the number of input units in the weight tensor
      and `fan_out` is the number of output units in the weight tensor.

      Arguments:
          seed: A Python integer. Used to seed the random generator.

      Returns:
          An initializer.

      References:
          Glorot & Bengio, AISTATS 2010
          http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
      """
      return VarianceScaling(
          scale=1., mode='fan_avg', distribution='normal', seed=seed)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def he_normal(seed=None):
      """He normal initializer.

      It draws samples from a truncated normal distribution centered on 0
      with `stddev = sqrt(2 / fan_in)`
      where `fan_in` is the number of input units in the weight tensor.

      Arguments:
          seed: A Python integer. Used to seed the random generator.

      Returns:
          An initializer.

      References:
          He et al., http://arxiv.org/abs/1502.01852
      """
      return VarianceScaling(
          scale=2., mode='fan_in', distribution='normal', seed=seed)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def he_uniform(seed=None):
      """He uniform variance scaling initializer.

      It draws samples from a uniform distribution within [-limit, limit]
      where `limit` is `sqrt(6 / fan_in)`
      where `fan_in` is the number of input units in the weight tensor.

      Arguments:
          seed: A Python integer. Used to seed the random generator.

      Returns:
          An initializer.

      References:
          He et al., http://arxiv.org/abs/1502.01852
      """
      return VarianceScaling(
          scale=2., mode='fan_in', distribution='uniform', seed=seed)


    # Compatibility aliases

    # pylint: disable=invalid-name
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def predict(self, x, **kwargs):
            """Returns the class predictions for the given test data.

            Arguments:
                x: array-like, shape `(n_samples, n_features)`
                    Test samples where n_samples in the number of samples
                    and n_features is the number of features.
                **kwargs: dictionary arguments
                    Legal arguments are the arguments
                    of `Sequential.predict_classes`.

            Returns:
                preds: array-like, shape `(n_samples,)`
                    Class predictions.
            """
            kwargs = self.filter_sk_params(Sequential.predict_classes, kwargs)
            classes = self.model.predict_classes(x, **kwargs)
            return self.classes_[classes]
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def predict(self, x, **kwargs):
            """Returns predictions for the given test data.

            Arguments:
                x: array-like, shape `(n_samples, n_features)`
                    Test samples where n_samples in the number of samples
                    and n_features is the number of features.
                **kwargs: dictionary arguments
                    Legal arguments are the arguments of `Sequential.predict`.

            Returns:
                preds: array-like, shape `(n_samples,)`
                    Predictions.
            """
            kwargs = self.filter_sk_params(Sequential.predict, kwargs)
            return np.squeeze(self.model.predict(x, **kwargs))
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def score(self, x, y, **kwargs):
            """Returns the mean loss on the given test data and labels.

            Arguments:
                x: array-like, shape `(n_samples, n_features)`
                    Test samples where n_samples in the number of samples
                    and n_features is the number of features.
                y: array-like, shape `(n_samples,)`
                    True labels for X.
                **kwargs: dictionary arguments
                    Legal arguments are the arguments of `Sequential.evaluate`.

            Returns:
                score: float
                    Mean accuracy of predictions on X wrt. y.
            """
            kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)
            loss = self.model.evaluate(x, y, **kwargs)
            if isinstance(loss, list):
              return loss[0]
            return loss


# utils
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def _batch_shuffle(index_array, batch_size):
          """Shuffles an array in a batch-wise fashion.

          Useful for shuffling HDF5 arrays
          (where one cannot access arbitrary indices).

          Arguments:
              index_array: array of indices to be shuffled.
              batch_size: integer.

          Returns:
              The `index_array` array, shuffled in a batch-wise fashion.
          """
          batch_count = int(len(index_array) / batch_size)
          # to reshape we need to be cleanly divisible by batch size
          # we stash extra items and reappend them after shuffling
          last_batch = index_array[batch_count * batch_size:]
          index_array = index_array[:batch_count * batch_size]
          index_array = index_array.reshape((batch_count, batch_size))
          np.random.shuffle(index_array)
          index_array = index_array.flatten()
          return np.append(index_array, last_batch)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_word_index(path='imdb_word_index.json'):
          """Retrieves the dictionary mapping word indices back to words.

          Arguments:
              path: where to cache the data (relative to `~/.keras/dataset`).

          Returns:
              The word index dictionary.
          """
          path = get_file(
              path,
              origin='https://s3.amazonaws.com/text-datasets/imdb_word_index.json')
          f = open(path)
          data = json.load(f)
          f.close()
          return data
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def load_data(path='mnist.npz'):
          """Loads the MNIST dataset.

          Arguments:
              path: path where to cache the dataset locally
                  (relative to ~/.keras/datasets).

          Returns:
              Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
          """
          path = get_file(
              path, origin='https://s3.amazonaws.com/img-datasets/mnist.npz')
          f = np.load(path)
          x_train = f['x_train']
          y_train = f['y_train']
          x_test = f['x_test']
          y_test = f['y_test']
          f.close()
          return (x_train, y_train), (x_test, y_test)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_word_index(path='reuters_word_index.json'):
          """Retrieves the dictionary mapping word indices back to words.

          Arguments:
              path: where to cache the data (relative to `~/.keras/dataset`).

          Returns:
              The word index dictionary.
          """
          path = get_file(
              path,
              origin='https://s3.amazonaws.com/text-datasets/reuters_word_index.json')
          f = open(path)
          data = json.load(f)
          f.close()
          return data
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def __match_with_labels(self,gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores,jaccard,matching_threshold,gt_labels,gt_bboxes,num_anchors):
        #debugging info
        #jaccard = tf.Print(jaccard, [gt_labels], "gt_labels")
        #match default boxes to any ground truth with jaccard overlap higher than a threshold (0.5).
        mask = tf.reduce_max (jaccard, axis = 0) > matching_threshold
        mask_inds = tf.argmax(jaccard, axis = 0)
        matched_labels = tf.gather(gt_labels, mask_inds)
        gt_anchor_labels = tf.where(mask, matched_labels, gt_anchor_labels)
        gt_anchor_bboxes = tf.where(mask, tf.gather(gt_bboxes, mask_inds),gt_anchor_bboxes)
        gt_anchor_scores = tf.reduce_max(jaccard, axis= 0)



        #matching each ground truth box to the default box with the best jaccard overlap
        use_no_miss = True
        if use_no_miss:
            gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores = self.__match_no_miss(gt_anchor_labels, \
                                                                                      gt_anchor_bboxes, gt_anchor_scores, jaccard, \
                                                                                      gt_labels, gt_bboxes, num_anchors)

        return gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def average_impurity(self):
    """Constructs a TF graph for evaluating the average leaf impurity of a tree.

    If in regression mode, this is the leaf variance. If in classification mode,
    this is the gini impurity.

    Returns:
      The last op in the graph.
    """
    children = array_ops.squeeze(array_ops.slice(
        self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
    is_leaf = math_ops.equal(constants.LEAF_NODE, children)
    leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
                                                 squeeze_dims=[1]))
    counts = array_ops.gather(self.variables.node_sums, leaves)
    gini = self._weighted_gini(counts)
    # Guard against step 1, when there often are no leaves yet.
    def impurity():
      return gini
    # Since average impurity can be used for loss, when there's no data just
    # return a big number so that loss always decreases.
    def big():
      return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
    return control_flow_ops.cond(math_ops.greater(
        array_ops.shape(leaves)[0], 0), impurity, big)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def average_impurity(self):
    """Constructs a TF graph for evaluating the average leaf impurity of a tree.

    If in regression mode, this is the leaf variance. If in classification mode,
    this is the gini impurity.

    Returns:
      The last op in the graph.
    """
    children = array_ops.squeeze(array_ops.slice(
        self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
    is_leaf = math_ops.equal(constants.LEAF_NODE, children)
    leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
                                                 squeeze_dims=[1]))
    counts = array_ops.gather(self.variables.node_sums, leaves)
    gini = self._weighted_gini(counts)
    # Guard against step 1, when there often are no leaves yet.
    def impurity():
      return gini
    # Since average impurity can be used for loss, when there's no data just
    # return a big number so that loss always decreases.
    def big():
      return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
    return control_flow_ops.cond(math_ops.greater(
        array_ops.shape(leaves)[0], 0), impurity, big)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _inverse_log_det_jacobian(self, y):
    # WLOG, consider the vector case:
    #   x = log(y[:-1]) - log(y[-1])
    # where,
    #   y[-1] = 1 - sum(y[:-1]).
    # We have:
    #   det{ dX/dY } = det{ diag(1 ./ y[:-1]) + 1 / y[-1] }
    #                = det{ inv{ diag(y[:-1]) - y[:-1]' y[:-1] } }   (1)
    #                = 1 / det{ diag(y[:-1]) - y[:-1]' y[:-1] }
    #                = 1 / { (1 + y[:-1]' inv(diag(y[:-1])) y[:-1]) *
    #                        det(diag(y[:-1])) }                     (2)
    #                = 1 / { y[-1] prod(y[:-1]) }
    #                = 1 / prod(y)
    # (1) - https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula
    #       or by noting that det{ dX/dY } = 1 / det{ dY/dX } from Bijector
    #       docstring "Tip".
    # (2) - https://en.wikipedia.org/wiki/Matrix_determinant_lemma
    return -math_ops.reduce_sum(math_ops.log(y), reduction_indices=-1)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: A `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  return array_ops.where(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator, array_ops.where(
          math_ops.equal(denominator, 0),
          array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _create_output_alternatives(self, predictions):
    """Creates output alternative for the Head.

    Args:
      predictions: a dict of {tensor_name: Tensor}, where 'tensor_name' is a
        symbolic name for an output Tensor possibly but not necessarily taken
        from `PredictionKey`, and 'Tensor' is the corresponding output Tensor
        itself.

    Returns:
      `dict` of {submodel_name: (problem_type, {tensor_name: Tensor})}, where
      'submodel_name' is a submodel identifier that should be consistent across
      the pipeline (here likely taken from the head_name),
      'problem_type' is a `ProblemType`,
      'tensor_name' is a symbolic name for an output Tensor possibly but not
       necessarily taken from `PredictionKey`, and
      'Tensor' is the corresponding output Tensor itself.
    """
    return {self._head_name: (self._problem_type, predictions)}


# TODO(zakaria): use contrib losses.
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _safe_div(numerator, denominator, name):
  """Divides two values, returning 0 if the denominator is <= 0.

  Args:
    numerator: A real `Tensor`.
    denominator: A real `Tensor`, with dtype matching `numerator`.
    name: Name for the returned op.

  Returns:
    0 if `denominator` <= 0, else `numerator` / `denominator`
  """
  return array_ops.where(
      math_ops.greater(denominator, 0),
      math_ops.truediv(numerator, denominator),
      0,
      name=name)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _get_sharding_func(size, num_shards):
    """Create sharding function for scatter update."""

    def func(ids):
      if num_shards == 1:
        return None, ids
      else:
        ids_per_shard = size // num_shards
        extras = size % num_shards
        assignments = math_ops.maximum(ids // (ids_per_shard + 1),
                                       (ids - extras) // ids_per_shard)
        new_ids = array_ops.where(assignments < extras,
                                  ids % (ids_per_shard + 1),
                                  (ids - extras) % ids_per_shard)
        return assignments, new_ids

    return func
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff
    with ops.name_scope("transform"):
      n = ops.convert_to_tensor(n, name="n")
      x_samps = self.distribution.sample(n, seed=seed)
      ones = array_ops.ones_like(x_samps)

      # Snap values to the intervals (j - 1, j].
      result_so_far = math_ops.ceil(x_samps)

      if lower_cutoff is not None:
        result_so_far = array_ops.where(result_so_far < lower_cutoff,
                                        lower_cutoff * ones, result_so_far)

      if upper_cutoff is not None:
        result_so_far = array_ops.where(result_so_far > upper_cutoff,
                                        upper_cutoff * ones, result_so_far)

      return result_so_far
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _log_prob_with_logsf_and_logcdf(self, y):
    """Compute log_prob(y) using log survival_function and cdf together."""
    # There are two options that would be equal if we had infinite precision:
    # Log[ sf(y - 1) - sf(y) ]
    #   = Log[ exp{logsf(y - 1)} - exp{logsf(y)} ]
    # Log[ cdf(y) - cdf(y - 1) ]
    #   = Log[ exp{logcdf(y)} - exp{logcdf(y - 1)} ]
    logsf_y = self.log_survival_function(y)
    logsf_y_minus_1 = self.log_survival_function(y - 1)
    logcdf_y = self.log_cdf(y)
    logcdf_y_minus_1 = self.log_cdf(y - 1)

    # Important:  Here we use select in a way such that no input is inf, this
    # prevents the troublesome case where the output of select can be finite,
    # but the output of grad(select) will be NaN.

    # In either case, we are doing Log[ exp{big} - exp{small} ]
    # We want to use the sf items precisely when we are on the right side of the
    # median, which occurs when logsf_y < logcdf_y.
    big = array_ops.where(logsf_y < logcdf_y, logsf_y_minus_1, logcdf_y)
    small = array_ops.where(logsf_y < logcdf_y, logsf_y, logcdf_y_minus_1)

    return _logsum_expbig_minus_expsmall(big, small)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _mode(self):
    mode = (self.a - 1.)/ (self.a_b_sum - 2.)
    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      return array_ops.where(
          math_ops.logical_and(
              math_ops.greater(self.a, 1.),
              math_ops.greater(self.b, 1.)),
          mode,
          array_ops.fill(self.batch_shape(), nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies([
          check_ops.assert_less(
              array_ops.ones((), dtype=self.dtype), self.a,
              message="Mode not defined for components of a <= 1."),
          check_ops.assert_less(
              array_ops.ones((), dtype=self.dtype), self.b,
              message="Mode not defined for components of b <= 1."),
      ], mode)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def sqrt_log_abs_det(self):
    """Computes (log o abs o det)(X) for matrix X.

    Doesn't actually do the sqrt! Named as such to agree with API.

    To compute det(M + V D V.T), we use the matrix determinant lemma:
      det(Tril + V D V.T) = det(C) det(D) det(M)
    where C is defined as in `_inverse`, ie,
      C = inv(D) + V.T inv(M) V.

    See: https://en.wikipedia.org/wiki/Matrix_determinant_lemma

    Returns:
      log_abs_det: `Tensor`.
    """
    log_det_c = math_ops.log(math_ops.abs(
        linalg_ops.matrix_determinant(self._woodbury_sandwiched_term())))
    # Reduction is ok because we always prepad inputs to this class.
    log_det_m = math_ops.reduce_sum(math_ops.log(math_ops.abs(
        array_ops.matrix_diag_part(self._m))), reduction_indices=[-1])
    return log_det_c + 2. * self._d.sqrt_log_abs_det() + log_det_m
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _woodbury_sandwiched_term(self):
    """Computes the sandwiched term in the Woodbury identity.

    Computes the "`C`" in the the identity:
       inv(M + V D V.T) = inv(M) - inv(M) V inv(C) V.T inv(M)
    where,
       C = inv(D) + V.T inv(M) V.

    See: https://en.wikipedia.org/wiki/Woodbury_matrix_identity

    Returns:
      woodbury_sandwich_term: A `Tensor` to be used like `C`, above.
    """
    minv_v = linalg_ops.matrix_triangular_solve(self._m, self._v)
    vt_minv_v = math_ops.matmul(self._v, minv_v, adjoint_a=True)
    return self._d_inv.add_to_tensor(vt_minv_v)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _process_matrix(self, matrix, min_rank, event_ndims):
    """Helper to __init__ which gets matrix in batch-ready form."""
    # Pad the matrix so that matmul works in the case of a matrix and vector
    # input.  Keep track if the matrix was padded, to distinguish between a
    # rank 3 tensor and a padded rank 2 tensor.
    # TODO(srvasude): Remove side-effects from functions. Its currently unbroken
    # but error-prone since the function call order may change in the future.
    self._rank_two_event_ndims_one = math_ops.logical_and(
        math_ops.equal(array_ops.rank(matrix), min_rank),
        math_ops.equal(event_ndims, 1))
    left = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
    pad = array_ops.concat(
        [array_ops.ones(
            [left], dtype=dtypes.int32), array_ops.shape(matrix)],
        0)
    return array_ops.reshape(matrix, pad)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _mode(self):
    mode = ((self.alpha - 1.) /
            (array_ops.expand_dims(self.alpha_sum, dim=-1) -
             math_ops.cast(self.event_shape()[0], self.dtype)))
    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      shape = array_ops.concat((self.batch_shape(), self.event_shape()), 0)
      return array_ops.where(
          math_ops.greater(self.alpha, 1.),
          mode,
          array_ops.fill(shape, nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies([
          check_ops.assert_less(
              array_ops.ones((), dtype=self.dtype), self.alpha,
              message="mode not defined for components of alpha <= 1")
      ], mode)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def sample(self, time, outputs, state, name=None):
    with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
                        [time, outputs, state]):
      # Return -1s where we did not sample, and sample_ids elsewhere
      select_sample_noise = random_ops.random_uniform(
          [self.batch_size], seed=self._scheduling_seed)
      select_sample = (self._sampling_probability > select_sample_noise)
      sample_id_sampler = categorical.Categorical(logits=outputs)
      return array_ops.where(
          select_sample,
          sample_id_sampler.sample(seed=self._seed),
          array_ops.tile([-1], [self.batch_size]))
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def next_inputs(self, time, outputs, state, sample_ids, name=None):
    with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
                        [time, outputs, state, sample_ids]):
      (finished, base_next_inputs, state) = (
          super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
              time=time,
              outputs=outputs,
              state=state,
              sample_ids=sample_ids,
              name=name))

      def maybe_sample():
        """Perform scheduled sampling."""
        where_sampling = math_ops.cast(
            array_ops.where(sample_ids > -1), dtypes.int32)
        where_not_sampling = math_ops.cast(
            array_ops.where(sample_ids <= -1), dtypes.int32)
        where_sampling_flat = array_ops.reshape(where_sampling, [-1])
        where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1])
        sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat)
        inputs_not_sampling = array_ops.gather(
            base_next_inputs, where_not_sampling_flat)
        sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
        base_shape = array_ops.shape(base_next_inputs)
        return (array_ops.scatter_nd(indices=where_sampling,
                                     updates=sampled_next_inputs,
                                     shape=base_shape)
                + array_ops.scatter_nd(indices=where_not_sampling,
                                       updates=inputs_not_sampling,
                                       shape=base_shape))

      all_finished = math_ops.reduce_all(finished)
      next_inputs = control_flow_ops.cond(
          all_finished, lambda: base_next_inputs, maybe_sample)
      return (finished, next_inputs, state)
项目:Question-Answering    作者:MurtyShikhar    | 项目源码 | 文件源码
def _maybe_mask_score(score, memory_sequence_length, score_mask_value):
  if memory_sequence_length is None:
    return score
  message = ("All values in memory_sequence_length must greater than zero.")
  with ops.control_dependencies(
      [check_ops.assert_positive(memory_sequence_length, message=message)]):
    score_mask = array_ops.sequence_mask(
        memory_sequence_length, maxlen=array_ops.shape(score)[1])
    score_mask_values = score_mask_value * array_ops.ones_like(score)
    return array_ops.where(score_mask, score, score_mask_values)
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def sample(self, time, outputs, state, name=None):
    with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
                        [time, outputs, state]):
      # Return -1s where we did not sample, and sample_ids elsewhere
      select_sample_noise = random_ops.random_uniform(
          [self.batch_size], seed=self._scheduling_seed)
      select_sample = (self._sampling_probability > select_sample_noise)
      sample_id_sampler = categorical.Categorical(logits=outputs)
      return array_ops.where(
          select_sample,
          sample_id_sampler.sample(seed=self._seed),
          array_ops.tile([-1], [self.batch_size]))
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def next_inputs(self, time, outputs, state, sample_ids, name=None):
    with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
                        [time, outputs, state, sample_ids]):
      (finished, base_next_inputs, state) = (
          super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
              time=time,
              outputs=outputs,
              state=state,
              sample_ids=sample_ids,
              name=name))

      def maybe_sample():
        """Perform scheduled sampling."""
        where_sampling = math_ops.cast(
            array_ops.where(sample_ids > -1), dtypes.int32)
        where_not_sampling = math_ops.cast(
            array_ops.where(sample_ids <= -1), dtypes.int32)
        where_sampling_flat = array_ops.reshape(where_sampling, [-1])
        where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1])
        sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat)
        inputs_not_sampling = array_ops.gather(
            base_next_inputs, where_not_sampling_flat)
        sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
        base_shape = array_ops.shape(base_next_inputs)
        return (array_ops.scatter_nd(indices=where_sampling,
                                     updates=sampled_next_inputs,
                                     shape=base_shape)
                + array_ops.scatter_nd(indices=where_not_sampling,
                                       updates=inputs_not_sampling,
                                       shape=base_shape))

      all_finished = math_ops.reduce_all(finished)
      next_inputs = control_flow_ops.cond(
          all_finished, lambda: base_next_inputs, maybe_sample)
      return (finished, next_inputs, state)
项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def get_stats(self, session):
    num_nodes = self.variables.end_of_tree.eval(session=session) - 1
    num_leaves = array_ops.where(
        math_ops.equal(array_ops.squeeze(array_ops.slice(
            self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
        ).eval(session=session).shape[0]
    return TreeStats(num_nodes, num_leaves)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def expand_dims(x, axis=-1):
      """Adds a 1-sized dimension at index "axis".

      Arguments:
          x: A tensor or variable.
          axis: Position where to add a new axis.

      Returns:
          A tensor with expanded dimensions.
      """
      return array_ops.expand_dims(x, axis)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def elu(x, alpha=1.):
      """Exponential linear unit.

      Arguments:
          x: A tenor or variable to compute the activation function for.
          alpha: A scalar, slope of positive section.

      Returns:
          A tensor.
      """
      res = nn.elu(x)
      if alpha == 1:
        return res
      else:
        return array_ops.where(x > 0, res, alpha * res)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def fit(self, x, y, **kwargs):
            """Constructs a new model with `build_fn` & fit the model to `(x, y)`.

            Arguments:
                x : array-like, shape `(n_samples, n_features)`
                    Training samples where n_samples in the number of samples
                    and n_features is the number of features.
                y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
                    True labels for X.
                **kwargs: dictionary arguments
                    Legal arguments are the arguments of `Sequential.fit`

            Returns:
                history : object
                    details about the training history at each epoch.
            """
            if self.build_fn is None:
              self.model = self.__call__(**self.filter_sk_params(self.__call__))
            elif (not isinstance(self.build_fn, types.FunctionType) and
                  not isinstance(self.build_fn, types.MethodType)):
              self.model = self.build_fn(
                  **self.filter_sk_params(self.build_fn.__call__))
            else:
              self.model = self.build_fn(**self.filter_sk_params(self.build_fn))

            loss_name = self.model.loss
            if hasattr(loss_name, '__name__'):
              loss_name = loss_name.__name__
            if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:
              y = to_categorical(y)

            fit_args = copy.deepcopy(self.filter_sk_params(Sequential.fit))
            fit_args.update(kwargs)

            history = self.model.fit(x, y, **fit_args)

            return history
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def fit(self, x, y, **kwargs):
            """Constructs a new model with `build_fn` & fit the model to `(x, y)`.

            Arguments:
                x : array-like, shape `(n_samples, n_features)`
                    Training samples where n_samples in the number of samples
                    and n_features is the number of features.
                y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
                    True labels for X.
                **kwargs: dictionary arguments
                    Legal arguments are the arguments of `Sequential.fit`

            Returns:
                history : object
                    details about the training history at each epoch.

            Raises:
                ValueError: In case of invalid shape for `y` argument.
            """
            y = np.array(y)
            if len(y.shape) == 2 and y.shape[1] > 1:
              self.classes_ = np.arange(y.shape[1])
            elif (len(y.shape) == 2 and y.shape[1] == 1) or len(y.shape) == 1:
              self.classes_ = np.unique(y)
              y = np.searchsorted(self.classes_, y)
            else:
              raise ValueError('Invalid shape for y: ' + str(y.shape))
            self.n_classes_ = len(self.classes_)
            return super(KerasClassifier, self).fit(x, y, **kwargs)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def score(self, x, y, **kwargs):
            """Returns the mean accuracy on the given test data and labels.

            Arguments:
                x: array-like, shape `(n_samples, n_features)`
                    Test samples where n_samples in the number of samples
                    and n_features is the number of features.
                y: array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
                    True labels for x.
                **kwargs: dictionary arguments
                    Legal arguments are the arguments of `Sequential.evaluate`.

            Returns:
                score: float
                    Mean accuracy of predictions on X wrt. y.

            Raises:
                ValueError: If the underlying model isn't configured to
                    compute accuracy. You should pass `metrics=["accuracy"]` to
                    the `.compile()` method of the model.
            """
            y = np.searchsorted(self.classes_, y)
            kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)

            loss_name = self.model.loss
            if hasattr(loss_name, '__name__'):
              loss_name = loss_name.__name__
            if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:
              y = to_categorical(y)

            outputs = self.model.evaluate(x, y, **kwargs)
            if not isinstance(outputs, list):
              outputs = [outputs]
            for name, output in zip(self.model.metrics_names, outputs):
              if name == 'acc':
                return output
            raise ValueError('The model is not configured to compute accuracy. '
                             'You should pass `metrics=["accuracy"]` to '
                             'the `model.compile()` method.')
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def urlretrieve(url, filename, reporthook=None, data=None):
            """Replacement for `urlretrive` for Python 2.

            Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
            `urllib` module, known to have issues with proxy management.

            Arguments:
                url: url to retrieve.
                filename: where to store the retrieved data locally.
                reporthook: a hook function that will be called once
                    on establishment of the network connection and once
                    after each block read thereafter.
                    The hook will be passed three arguments;
                    a count of blocks transferred so far,
                    a block size in bytes, and the total size of the file.
                data: `data` argument passed to `urlopen`.
            """

            def chunk_read(response, chunk_size=8192, reporthook=None):
              content_type = response.info().get('Content-Length')
              total_size = -1
              if content_type is not None:
                total_size = int(content_type.strip())
              count = 0
              while 1:
                chunk = response.read(chunk_size)
                count += 1
                if not chunk:
                  reporthook(count, total_size, total_size)
                  break
                if reporthook:
                  reporthook(count, chunk_size, total_size)
                yield chunk

            response = urlopen(url, data)
            with open(filename, 'wb') as fd:
              for chunk in chunk_read(response, reporthook=reporthook):
                fd.write(chunk)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def make_sampling_table(size, sampling_factor=1e-5):
          """Generates a word rank-based probabilistic sampling table.

          This generates an array where the ith element
          is the probability that a word of rank i would be sampled,
          according to the sampling distribution used in word2vec.

          The word2vec formula is:
              p(word) = min(1, sqrt(word.frequency/sampling_factor) /
              (word.frequency/sampling_factor))

          We assume that the word frequencies follow Zipf's law (s=1) to derive
          a numerical approximation of frequency(rank):
             frequency(rank) ~ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))
              where gamma is the Euler-Mascheroni constant.

          Arguments:
              size: int, number of possible words to sample.
              sampling_factor: the sampling factor in the word2vec formula.

          Returns:
              A 1D Numpy array of length `size` where the ith entry
              is the probability that a word of rank i should be sampled.
          """
          gamma = 0.577
          rank = np.array(list(range(size)))
          rank[0] = 1
          inv_fq = rank * (np.log(rank) + gamma) + 0.5 - 1. / (12. * rank)
          f = sampling_factor * inv_fq

          return np.minimum(1., f / np.sqrt(f))
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def call(self, inputs, **kwargs):  # pylint: disable=unused-argument
            """This is where the layer's logic lives.

            Arguments:
                inputs: Input tensor, or list/tuple of input tensors.
                **kwargs: Additional keyword arguments.

            Returns:
                A tensor or list/tuple of tensors.
            """
            return inputs

          # 0. call tf.keras.Layer.__call__() on input tensor
          # 1. tf Layer __call__ is called
          # 2. tf.keras.Conv2D.build() is called
          # 3. link to tf.layers.Conv2D.build is called, inside, create kernel tensor and bias tensor
          # 4. back to tf.keras.Conv2D.build, constraints attributes are added
          # 5. tf.layers.convolutional.Conv2D.call is called:
                # create outputs from nn.convolution()
                # add bias tensor onto outputs
                # run activation on outputs
          # back to tf.layers.__call__():
          # 6. run _add_inbound_node:
                # fill in inbound_layers, node_indices, input_tensors, outputs_tensors based on info from input_tensors
                # create a node and store this node inside inbound_layer
                # add _keras_history to output_tensors
          # 7. other attributes added
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def save(self, filepath, overwrite=True, include_optimizer=True):
            """Save the model to a single HDF5 file.

            The savefile includes:
                - The model architecture, allowing to re-instantiate the model.
                - The model weights.
                - The state of the optimizer, allowing to resume training
                    exactly where you left off.

            This allows you to save the entirety of the state of a model
            in a single file.

            Saved models can be reinstantiated via `keras.models.load_model`.
            The model returned by `load_model`
            is a compiled model ready to be used (unless the saved model
            was never compiled in the first place).

            Arguments:
                filepath: String, path to the file to save the weights to.
                overwrite: Whether to silently overwrite any existing file at the
                    target location, or provide the user with a manual prompt.
                include_optimizer: If True, save optimizer's state together.

            Example:

            ```python
            from keras.models import load_model

            model.save('my_model.h5')  # creates a HDF5 file 'my_model.h5'
            del model  # deletes the existing model

            # returns a compiled model
            # identical to the previous one
            model = load_model('my_model.h5')
"""
        from tensorflow.contrib.keras.python.keras.models import save_model  # pylint: disable=g-import-not-at-top
        save_model(self, filepath, overwrite, include_optimizer)

```

项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def load_weights(self, filepath, by_name=False):
            """Loads all layer weights from a HDF5 save file.

            If `by_name` is False (default) weights are loaded
            based on the network's topology, meaning the architecture
            should be the same as when the weights were saved.
            Note that layers that don't have weights are not taken
            into account in the topological ordering, so adding or
            removing layers is fine as long as they don't have weights.

            If `by_name` is True, weights are loaded into layers
            only if they share the same name. This is useful
            for fine-tuning or transfer-learning models where
            some of the layers have changed.

            Arguments:
                filepath: String, path to the weights file to load.
                by_name: Boolean, whether to load weights by name
                    or by topological order.

            Raises:
                ImportError: If h5py is not available.
            """
            if h5py is None:
              raise ImportError('`load_weights` requires h5py.')
            f = h5py.File(filepath, mode='r')
            if 'layer_names' not in f.attrs and 'model_weights' in f:
              f = f['model_weights']
            if by_name:
              load_weights_from_hdf5_group_by_name(f, self.layers)
            else:
              load_weights_from_hdf5_group(f, self.layers)

            if hasattr(f, 'close'):
              f.close()
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def abs_smooth_2(x):
    """Smoothed absolute function. Useful to compute an L1 smooth error.

    Define as:
        x^2 / 2         if abs(x) < 1
        abs(x) - 0.5    if abs(x) > 1
    an implementation that strictly stick to the formula
    """
    absx = tf.abs(x)
    r = array_ops.where(absx < 1, math_ops.square(x)/2.0, absx-0.5)
    return r
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def __match_no_miss(self,gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores,jaccard,gt_labels,gt_bboxes, num_anchors):
        #make sure every ground truth box can be matched to at least one anchor box
        max_inds = tf.cast(tf.argmax(jaccard, axis=1),tf.int32)
        def cond(i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores):
            r = tf.less(i, tf.shape(gt_labels)[0])
            return r
        def body(i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores):

            #upate gt_anchors_labels
            updates = tf.reshape(gt_labels[i], [-1])
            indices = tf.reshape(max_inds[i],[1,-1])
            shape = tf.reshape(num_anchors,[-1])


            new_labels = tf.scatter_nd(indices, updates, shape)
            new_mask = tf.cast(new_labels, tf.bool)
            gt_anchors_labels = tf.where(new_mask, new_labels, gt_anchors_labels)

            #update gt_anchors_bboxes
            updates = tf.reshape(gt_bboxes[i], [1,-1])
            indices = tf.reshape(max_inds[i],[1,-1])
            shape = tf.shape(gt_anchors_bboxes)
            new_bboxes = tf.scatter_nd(indices, updates, shape)
            gt_anchors_bboxes = tf.where(new_mask, new_bboxes, gt_anchors_bboxes)

            #update gt_anchors_scores
            updates = tf.reshape(jaccard[i, max_inds[i]], [-1])
            indices = tf.reshape(max_inds[i],[1,-1])
            shape = tf.reshape(num_anchors,[-1])
            new_scores = tf.scatter_nd(indices, updates, shape)
            gt_anchors_scores = tf.where(new_mask, new_scores, gt_anchors_scores)



            return [i+1,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores]


        i = 0
        [i,gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores] = tf.while_loop(cond, body,[i,gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores])

        return gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"):
  """Boolean mask for `SparseTensor`s.

  Args:
    sparse_tensor: a `SparseTensor`.
    mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension
      of `sparse_tensor`.
    name: optional name for this operation.
  Returns:
    A `SparseTensor` that contains row `k` of `sparse_tensor` iff `mask[k]` is
    `True`.
  """
  # TODO(jamieas): consider mask dimension > 1 for symmetry with `boolean_mask`.
  with ops.name_scope(name, values=[sparse_tensor, mask]):
    mask = ops.convert_to_tensor(mask)
    mask_rows = array_ops.where(mask)
    first_indices = array_ops.squeeze(array_ops.slice(sparse_tensor.indices,
                                                      [0, 0], [-1, 1]))

    # Identify indices corresponding to the rows identified by mask_rows.
    sparse_entry_matches = functional_ops.map_fn(
        lambda x: math_ops.equal(first_indices, x),
        mask_rows,
        dtype=dtypes.bool)
    # Combine the rows of index_matches to form a mask for the sparse indices
    # and values.
    to_retain = array_ops.reshape(
        functional_ops.foldl(math_ops.logical_or, sparse_entry_matches), [-1])

    return sparse_ops.sparse_retain(sparse_tensor, to_retain)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def get_stats(self, session):
    num_nodes = self.variables.end_of_tree.eval(session=session) - 1
    num_leaves = array_ops.where(
        math_ops.equal(array_ops.squeeze(array_ops.slice(
            self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
        ).eval(session=session).shape[0]
    return TreeStats(num_nodes, num_leaves)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def get_stats(self, session):
    num_nodes = self.variables.end_of_tree.eval(session=session) - 1
    num_leaves = array_ops.where(
        math_ops.equal(array_ops.squeeze(array_ops.slice(
            self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
        ).eval(session=session).shape[0]
    return TreeStats(num_nodes, num_leaves)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def tile(labeled_tensor, multiples, name=None):
  """Constructs a tensor by tiling a given tensor.

  Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
  tensors would no longer be unique.)

  See lt.tile.

  Args:
    labeled_tensor: The input tensor.
    multiples: A mapping where the keys are axis names and the values are the
      integer number of times to tile along that axis. Only axes with a multiple
      different than 1 need be included.
    name: Optional op name.

  Returns:
    A tensor with the indicated axes tiled.

  Raises:
    ValueError: If the tiled axes are not axes in the input tensor, or if any
      axes in multiples have tick labels.
  """
  with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
    labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)

    if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
      raise ValueError('tile axes %r are not contained in the set of axis '
                       'names %r on the input labeled tensor' %
                       (multiples.keys(), labeled_tensor.axes))

    labeled_axes = [name for name in multiples
                    if labeled_tensor.axes[name].labels is not None]
    if labeled_axes:
      raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)

    multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
    tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)

    new_axes = [axis.name if axis.labels is None else axis
                for axis in labeled_tensor.axes.values()]
    return core.LabeledTensor(tile_op, new_axes)