Python tensorflow 模块,float64() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.float64()

项目:attend_infer_repeat    作者:akosiorek    | 项目源码 | 文件源码
def _anneal_weight(init_val, final_val, anneal_type, global_step, anneal_steps, hold_for=0., steps_div=1.,
                       dtype=tf.float64):

        val, final, step, hold_for, anneal_steps, steps_div = (tf.cast(i, dtype) for i in
                                                               (init_val, final_val, global_step, hold_for, anneal_steps, steps_div))
        step = tf.maximum(step - hold_for, 0.)

        if anneal_type == 'exp':
            decay_rate = tf.pow(final / val, steps_div / anneal_steps)
            val = tf.train.exponential_decay(val, step, steps_div, decay_rate)

        elif anneal_type == 'linear':
            val = final + (val - final) * (1. - step / anneal_steps)
        else:
            raise NotImplementedError

        anneal_weight = tf.maximum(final, val)
        return anneal_weight
项目:attend_infer_repeat    作者:akosiorek    | 项目源码 | 文件源码
def tabular_kl(p, q, zero_prob_value=0., logarg_clip=None):
    """Computes KL-divergence KL(p||q) for two probability mass functions (pmf) given in a tabular form.

    :param p: iterable
    :param q: iterable
    :param zero_prob_value: float; values below this threshold are treated as zero
    :param logarg_clip: float or None, clips the argument to the log to lie in [-logarg_clip, logarg_clip] if not None
    :return: iterable of brodcasted shape of (p * q), per-coordinate value of KL(p||q)
    """
    p, q = (tf.cast(i, tf.float64) for i in (p, q))
    non_zero = tf.greater(p, zero_prob_value)
    logarg = p / q

    if logarg_clip is not None:
        logarg = clip_preserve(logarg, 1. / logarg_clip, logarg_clip)

    log = masked_apply(logarg, tf.log, non_zero)
    kl = p * log

    return tf.cast(kl, tf.float32)
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def softmax_loss(self, antecedent_scores, antecedent_labels):
        """
        Computes the value of the loss function using antecedent_scores and antecedent_labels.
        Practically standard softmax function.
        Args:
            antecedent_scores: tf.float64, [num_mentions, max_ant + 1], output of fully-connected network that compute
                antecedent scores.
            antecedent_labels:  True labels for antecedent.

        Returns: [num_mentions]
            The value of loss function.
        """
        gold_scores = antecedent_scores + tf.log(tf.cast(antecedent_labels, tf.float64))  # [num_mentions, max_ant + 1]
        marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1])  # [num_mentions]
        log_norm = tf.reduce_logsumexp(antecedent_scores, [1])  # [num_mentions]
        return log_norm - marginalized_gold_scores  # [num_mentions]
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def create_torch_variable(self, value, gpu=False):
        """Convenience method that produces a tensor given the value of the defined type.

        Returns: a torch tensor of same type.
        """
        if isinstance(value, torch.autograd.Variable):
            if gpu:
                value = value.cuda()
            return value
        if not torch.is_tensor(value):
            if not isinstance(value, np.ndarray):
                value = np.array(value, dtype=self.dtype.as_numpy_dtype)
            else:
                value = value.astype(self.dtype.as_numpy_dtype)
            if value.size == 0:
                return value
            allowed = [tf.int16, tf.int32, tf.int64, tf.float16, tf.float32, tf.float64, tf.int8]
            if self.dtype in allowed:
                value = torch.autograd.Variable(torch.from_numpy(value))
        else:
            value = torch.autograd.Variable(value)
        if gpu and isinstance(value, torch.autograd.Variable):
            value = value.cuda()
        return value
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def _convert_string_dtype(dtype):
    if dtype == 'float16':
        return tf.float16
    if dtype == 'float32':
        return tf.float32
    elif dtype == 'float64':
        return tf.float64
    elif dtype == 'int16':
        return tf.int16
    elif dtype == 'int32':
        return tf.int32
    elif dtype == 'int64':
        return tf.int64
    elif dtype == 'uint8':
        return tf.int8
    elif dtype == 'uint16':
        return tf.uint16
    else:
        raise ValueError('Unsupported dtype:', dtype)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def conv1d(x, kernel, stride=1, border_mode='valid',
           image_shape=None, filter_shape=None):
    '''1D convolution.

    # Arguments
        kernel: kernel tensor.
        strides: stride integer.
        border_mode: string, "same" or "valid".
    '''
    # pre-process dtype
    if _FLOATX == 'float64':
        x = tf.cast(x, 'float32')
        kernel = tf.cast(kernel, 'float32')
    padding = _preprocess_border_mode(border_mode)
    x = tf.nn.conv1d(x, kernel, stride, padding=padding)
    # post-process dtype
    if _FLOATX == 'float64':
        x = tf.cast(x, 'float64')
    return x
项目:baselines    作者:openai    | 项目源码 | 文件源码
def __init__(self, epsilon=1e-2, shape=()):

        self._sum = tf.get_variable(
            dtype=tf.float64,
            shape=shape,
            initializer=tf.constant_initializer(0.0),
            name="runningsum", trainable=False)
        self._sumsq = tf.get_variable(
            dtype=tf.float64,
            shape=shape,
            initializer=tf.constant_initializer(epsilon),
            name="runningsumsq", trainable=False)
        self._count = tf.get_variable(
            dtype=tf.float64,
            shape=(),
            initializer=tf.constant_initializer(epsilon),
            name="count", trainable=False)
        self.shape = shape

        self.mean = tf.to_float(self._sum / self._count)
        self.std = tf.sqrt( tf.maximum( tf.to_float(self._sumsq / self._count) - tf.square(self.mean) , 1e-2 ))

        newsum = tf.placeholder(shape=self.shape, dtype=tf.float64, name='sum')
        newsumsq = tf.placeholder(shape=self.shape, dtype=tf.float64, name='var')
        newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count')
        self.incfiltparams = U.function([newsum, newsumsq, newcount], [],
            updates=[tf.assign_add(self._sum, newsum),
                     tf.assign_add(self._sumsq, newsumsq),
                     tf.assign_add(self._count, newcount)])
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def assert_same_float_dtype(tensors_with_name, dtype=None):
    """
    Whether all types of tensors in `tensors` are the same and floating type.

    :param tensors_with_name: A list of (tensor, tensor_name).
    :param dtype: Expected type. If `None`, depend on the type of tensors.
    :return: The type of `tensors`.
    """

    floating_types = [tf.float16, tf.float32, tf.float64]
    if dtype is None:
        return assert_same_specific_dtype(tensors_with_name, floating_types)
    elif dtype in floating_types:
        return assert_same_dtype(tensors_with_name, dtype)
    else:
        raise TypeError("The argument 'dtype' must be in %s" % floating_types)
项目:IDNNs    作者:ravidziv    | 项目源码 | 文件源码
def _convert_string_dtype(dtype):
    if dtype == 'float16':
        return tf.float16
    if dtype == 'float32':
        return tf.float32
    elif dtype == 'float64':
        return tf.float64
    elif dtype == 'int16':
        return tf.int16
    elif dtype == 'int32':
        return tf.int32
    elif dtype == 'int64':
        return tf.int64
    elif dtype == 'uint8':
        return tf.int8
    elif dtype == 'uint16':
        return tf.uint16
    else:
        raise ValueError('Unsupported dtype:', dtype)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testInputTypeError(self, use_bias):
    """Errors are thrown for invalid input types."""
    conv1 = snt.Conv2D(output_channels=1,
                       kernel_shape=3,
                       stride=1,
                       padding=snt.SAME,
                       name="conv1",
                       use_bias=use_bias,
                       initializers=create_constant_initializers(
                           1.0, 1.0, use_bias))

    for dtype in (tf.float16, tf.float64):
      x = tf.constant(np.ones([1, 5, 5, 1]), dtype=dtype)
      err = "Input must have dtype tf.float32.*"
      with self.assertRaisesRegexp(TypeError, err):
        conv1(x)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testInputTypeError(self, use_bias):
    """Errors are thrown for invalid input types."""
    conv1 = snt.Conv1D(output_channels=1,
                       kernel_shape=3,
                       stride=1,
                       padding=snt.VALID,
                       use_bias=use_bias,
                       name="conv1",
                       initializers=create_constant_initializers(
                           1.0, 1.0, use_bias))

    for dtype in (tf.float16, tf.float64):
      x = tf.constant(np.ones([1, 5, 1]), dtype=dtype)
      err = "Input must have dtype tf.float32.*"
      with self.assertRaisesRegexp(TypeError, err):
        conv1(x)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testInputTypeError(self, batch_size, in_length, in_channels, out_channels,
                         kernel_shape, padding, use_bias, out_shape,
                         stride_shape):
    """Errors are thrown for invalid input types."""
    conv1 = snt.Conv1DTranspose(
        output_channels=out_channels,
        output_shape=out_shape,
        kernel_shape=kernel_shape,
        padding=padding,
        stride=stride_shape,
        name="conv1",
        use_bias=use_bias)

    for dtype in (tf.float16, tf.float64):
      x = tf.constant(np.ones([batch_size, in_length,
                               in_channels]), dtype=dtype)
      err = "Input must have dtype tf.float32.*"
      with self.assertRaisesRegexp(TypeError, err):
        conv1(x)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testInputTypeError(self):
    """Errors are thrown for invalid input types."""
    conv1 = snt.Conv3D(output_channels=1,
                       kernel_shape=3,
                       stride=1,
                       padding=snt.SAME,
                       name="conv1",
                       initializers={
                           "w": tf.constant_initializer(1.0),
                           "b": tf.constant_initializer(1.0),
                       })

    for dtype in (tf.float16, tf.float64):
      x = tf.constant(np.ones([1, 5, 5, 5, 1]), dtype=dtype)
      self.assertRaisesRegexp(TypeError, "Input must have dtype tf.float32.*",
                              conv1, x)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testVariableInitialization(self):
    # Check that a simple operation involving the TrainableVariable
    # matches the result of the corresponding operation in numpy
    np.random.seed(100)
    types = (tf.float16, tf.float32, tf.float64)
    tol = (1e-2, 1e-6, 1e-9)
    tolerance_map = dict(zip(types, tol))
    lhs_shape = [3, 4]
    rhs_shape = [4, 6]
    for dtype in types:
      x = tf.placeholder(dtype, shape=lhs_shape)
      var = snt.TrainableVariable(shape=rhs_shape,
                                  dtype=dtype,
                                  initializers={"w": _test_initializer()})
      y = tf.matmul(x, var())
      with self.test_session() as sess:
        lhs_matrix = np.random.randn(*lhs_shape)
        sess.run(tf.global_variables_initializer())
        product, w = sess.run([y, var.w], {x: lhs_matrix})
      self.assertAllClose(product,
                          np.dot(
                              lhs_matrix.astype(dtype.as_numpy_dtype),
                              w.astype(dtype.as_numpy_dtype)),
                          atol=tolerance_map[dtype],
                          rtol=tolerance_map[dtype])
项目:tensorflow-forward-ad    作者:renmengye    | 项目源码 | 文件源码
def test_hessian_quadratic(self):
    rnd = np.random.RandomState(0)
    dtype = tf.float64
    with tf.Graph().as_default():
      r = tf.Variable(0.0, dtype=dtype)
      x = tf.constant(rnd.uniform(-1.0, 1.0, [2, 27]), dtype=dtype, name="x")
      w2 = tf.constant(rnd.uniform(-1.0, 1.0, [27, 1]), dtype=dtype, name="w2")
      v2 = tf.constant(rnd.uniform(-1.0, 1.0, [27, 1]), dtype=dtype, name="v2")
      w2v = tf.add(w2, tf.multiply(r, v2))
      h2 = tf.matmul(x, w2v)
      y2 = tf.reduce_sum(h2 * h2)

      grad_w = tf.gradients(y2, w2)
      hv_fw = hessian_vec_fw(y2, [w2v], [v2])
      hv_bk = hessian_vec_bk(y2, [w2], [v2])

      with self.test_session() as sess:
        sess.run(tf.global_variables_initializer())
        grad_w = sess.run(grad_w)
        hv_fw_val = sess.run(hv_fw)
        hv_bk_val = sess.run(hv_bk)
        np.testing.assert_allclose(hv_fw_val, hv_bk_val, rtol=1e-5)
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def average_precision_voc12(precision, recall, name=None):
    """Compute (interpolated) average precision from precision and recall Tensors.

    The implementation follows Pascal 2012 and ILSVRC guidelines.
    See also: https://sanchom.wordpress.com/tag/average-precision/
    """
    with tf.name_scope(name, 'average_precision_voc12', [precision, recall]):
        # Convert to float64 to decrease error on Riemann sums.
        precision = tf.cast(precision, dtype=tf.float64)
        recall = tf.cast(recall, dtype=tf.float64)

        # Add bounds values to precision and recall.
        precision = tf.concat([[0.], precision, [0.]], axis=0)
        recall = tf.concat([[0.], recall, [1.]], axis=0)
        # Ensures precision is increasing in reverse order.
        precision = tfe_math.cummax(precision, reverse=True)

        # Riemann sums for estimating the integral.
        # mean_pre = (precision[1:] + precision[:-1]) / 2.
        mean_pre = precision[1:]
        diff_rec = recall[1:] - recall[:-1]
        ap = tf.reduce_sum(mean_pre * diff_rec)
        return ap
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def average_precision_voc07(precision, recall, name=None):
    """Compute (interpolated) average precision from precision and recall Tensors.

    The implementation follows Pascal 2007 guidelines.
    See also: https://sanchom.wordpress.com/tag/average-precision/
    """
    with tf.name_scope(name, 'average_precision_voc07', [precision, recall]):
        # Convert to float64 to decrease error on cumulated sums.
        precision = tf.cast(precision, dtype=tf.float64)
        recall = tf.cast(recall, dtype=tf.float64)
        # Add zero-limit value to avoid any boundary problem...
        precision = tf.concat([precision, [0.]], axis=0)
        recall = tf.concat([recall, [np.inf]], axis=0)

        # Split the integral into 10 bins.
        l_aps = []
        for t in np.arange(0., 1.1, 0.1):
            mask = tf.greater_equal(recall, t)
            v = tf.reduce_max(tf.boolean_mask(precision, mask))
            l_aps.append(v / 11.)
        ap = tf.add_n(l_aps)
        return ap
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def _precision_recall(n_gbboxes, n_detections, scores, tp, fp, scope=None):
    """Compute precision and recall from scores, true positives and false
    positives booleans arrays
    """
    # Sort by score.
    with tf.name_scope(scope, 'prec_rec', [n_gbboxes, scores, tp, fp]):
        # Sort detections by score.
        scores, idxes = tf.nn.top_k(scores, k=n_detections, sorted=True)
        tp = tf.gather(tp, idxes)
        fp = tf.gather(fp, idxes)
        # Computer recall and precision.
        dtype = tf.float64
        tp = tf.cumsum(tf.cast(tp, dtype), axis=0)
        fp = tf.cumsum(tf.cast(fp, dtype), axis=0)
        recall = _safe_div(tp, tf.cast(n_gbboxes, dtype), 'recall')
        precision = _safe_div(tp, tp + fp, 'precision')

        return tf.tuple([precision, recall])
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def _convert_string_dtype(dtype):
    if dtype == 'float16':
        return tf.float16
    if dtype == 'float32':
        return tf.float32
    elif dtype == 'float64':
        return tf.float64
    elif dtype == 'int16':
        return tf.int16
    elif dtype == 'int32':
        return tf.int32
    elif dtype == 'int64':
        return tf.int64
    elif dtype == 'uint8':
        return tf.int8
    elif dtype == 'uint16':
        return tf.uint16
    else:
        raise ValueError('Unsupported dtype:', dtype)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def conv1d(x, kernel, stride=1, border_mode='valid',
           image_shape=None, filter_shape=None):
    """1D convolution.

    # Arguments
        kernel: kernel tensor.
        strides: stride integer.
        border_mode: string, `"same"` or `"valid"`.

    # Returns
        A tensor, result of 1D convolution.
    """
    # pre-process dtype
    x_dtype = dtype(x)
    if x_dtype == 'float64':
        x = tf.cast(x, 'float32')
        kernel = tf.cast(kernel, 'float32')
    padding = _preprocess_border_mode(border_mode)
    x = tf.nn.conv1d(x, kernel, stride, padding=padding)
    # post-process dtype
    if x_dtype == 'float64':
        x = tf.cast(x, 'float64')
    return x
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def average_precision_voc12(precision, recall, name=None):
    """Compute (interpolated) average precision from precision and recall Tensors.

    The implementation follows Pascal 2012 and ILSVRC guidelines.
    See also: https://sanchom.wordpress.com/tag/average-precision/
    """
    with tf.name_scope(name, 'average_precision_voc12', [precision, recall]):
        # Convert to float64 to decrease error on Riemann sums.
        precision = tf.cast(precision, dtype=tf.float64)
        recall = tf.cast(recall, dtype=tf.float64)

        # Add bounds values to precision and recall.
        precision = tf.concat([[0.], precision, [0.]], axis=0)
        recall = tf.concat([[0.], recall, [1.]], axis=0)
        # Ensures precision is increasing in reverse order.
        precision = tfe_math.cummax(precision, reverse=True)

        # Riemann sums for estimating the integral.
        # mean_pre = (precision[1:] + precision[:-1]) / 2.
        mean_pre = precision[1:]
        diff_rec = recall[1:] - recall[:-1]
        ap = tf.reduce_sum(mean_pre * diff_rec)
        return ap
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def average_precision_voc07(precision, recall, name=None):
    """Compute (interpolated) average precision from precision and recall Tensors.

    The implementation follows Pascal 2007 guidelines.
    See also: https://sanchom.wordpress.com/tag/average-precision/
    """
    with tf.name_scope(name, 'average_precision_voc07', [precision, recall]):
        # Convert to float64 to decrease error on cumulated sums.
        precision = tf.cast(precision, dtype=tf.float64)
        recall = tf.cast(recall, dtype=tf.float64)
        # Add zero-limit value to avoid any boundary problem...
        precision = tf.concat([precision, [0.]], axis=0)
        recall = tf.concat([recall, [np.inf]], axis=0)

        # Split the integral into 10 bins.
        l_aps = []
        for t in np.arange(0., 1.1, 0.1):
            mask = tf.greater_equal(recall, t)
            v = tf.reduce_max(tf.boolean_mask(precision, mask))
            l_aps.append(v / 11.)
        ap = tf.add_n(l_aps)
        return ap
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def _precision_recall(n_gbboxes, n_detections, scores, tp, fp, scope=None):
    """Compute precision and recall from scores, true positives and false
    positives booleans arrays
    """
    # Sort by score.
    with tf.name_scope(scope, 'prec_rec', [n_gbboxes, scores, tp, fp]):
        # Sort detections by score.
        scores, idxes = tf.nn.top_k(scores, k=n_detections, sorted=True)
        tp = tf.gather(tp, idxes)
        fp = tf.gather(fp, idxes)
        # Computer recall and precision.
        dtype = tf.float64
        tp = tf.cumsum(tf.cast(tp, dtype), axis=0)
        fp = tf.cumsum(tf.cast(fp, dtype), axis=0)
        recall = _safe_div(tp, tf.cast(n_gbboxes, dtype), 'recall')
        precision = _safe_div(tp, tp + fp, 'precision')

        return tf.tuple([precision, recall])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testTensorSignatureExampleParserDict(self):
    examples = tf.placeholder(name='example', shape=[None], dtype=tf.string)
    placeholder_a = tf.placeholder(name='test',
                                   shape=[None, 100],
                                   dtype=tf.int32)
    placeholder_b = tf.placeholder(name='bb',
                                   shape=[None, 100],
                                   dtype=tf.float64)
    inputs = {'a': placeholder_a, 'b': placeholder_b}
    signatures = tensor_signature.create_signatures(inputs)
    result = tensor_signature.create_example_parser_from_signatures(
        signatures, examples)
    self.assertTrue(tensor_signature.tensors_compatible(result, signatures))
    new_signatures = tensor_signature.create_signatures(result)
    self.assertTrue(new_signatures['a'].is_compatible_with(signatures['a']))
    self.assertTrue(new_signatures['b'].is_compatible_with(signatures['b']))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_safe_embedding_lookup_sparse_3d_partitioned_inconsistent_weights(
      self):
    with self.test_session():
      embedding_weights = self._random_weights(num_shards=3)
      sparse_ids, sparse_weights = self._ids_and_weights_3d()

      embedding_weights[1] = embedding_weights[1].astype(np.float64)
      self.assertRaises(ValueError,
                        tf.contrib.layers.safe_embedding_lookup_sparse,
                        embedding_weights, sparse_ids)
      embedding_weights = [
          tf.constant(w, dtype=tf.float64) for w in embedding_weights
      ]
      self.assertRaises(ValueError,
                        tf.contrib.layers.safe_embedding_lookup_sparse,
                        embedding_weights, sparse_ids, sparse_weights)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testTensorSignatureExampleParserDict(self):
    examples = tf.placeholder(name='example', shape=[None], dtype=tf.string)
    placeholder_a = tf.placeholder(name='test',
                                   shape=[None, 100],
                                   dtype=tf.int32)
    placeholder_b = tf.placeholder(name='bb',
                                   shape=[None, 100],
                                   dtype=tf.float64)
    inputs = {'a': placeholder_a, 'b': placeholder_b}
    signatures = tensor_signature.create_signatures(inputs)
    result = tensor_signature.create_example_parser_from_signatures(
        signatures, examples)
    self.assertTrue(tensor_signature.tensors_compatible(result, signatures))
    new_signatures = tensor_signature.create_signatures(result)
    self.assertTrue(new_signatures['a'].is_compatible_with(signatures['a']))
    self.assertTrue(new_signatures['b'].is_compatible_with(signatures['b']))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_safe_embedding_lookup_sparse_3d_partitioned_inconsistent_weights(
      self):
    with self.test_session():
      embedding_weights = self._random_weights(num_shards=3)
      sparse_ids, sparse_weights = self._ids_and_weights_3d()

      embedding_weights[1] = embedding_weights[1].astype(np.float64)
      self.assertRaises(ValueError,
                        tf.contrib.layers.safe_embedding_lookup_sparse,
                        embedding_weights, sparse_ids)
      embedding_weights = [
          tf.constant(w, dtype=tf.float64) for w in embedding_weights
      ]
      self.assertRaises(ValueError,
                        tf.contrib.layers.safe_embedding_lookup_sparse,
                        embedding_weights, sparse_ids, sparse_weights)
项目:nengo_dl    作者:nengo    | 项目源码 | 文件源码
def test_soft_lif(Simulator, sigma, seed):
    with nengo.Network(seed=seed) as net:
        inp = nengo.Node([0])
        ens = nengo.Ensemble(10, 1, neuron_type=SoftLIFRate(sigma=sigma))
        nengo.Connection(inp, ens)
        p = nengo.Probe(ens.neurons)

    x = str(ens.neuron_type)
    if sigma == 1:
        assert "sigma" not in x
    else:
        assert "sigma=%s" % sigma in x

    with nengo.Simulator(net) as sim:
        _, nengo_curves = nengo.utils.ensemble.tuning_curves(ens, sim)
        sim.run_steps(30)

    with Simulator(net, dtype=tf.float64) as sim2:
        _, nengo_dl_curves = nengo.utils.ensemble.tuning_curves(ens, sim2)
        sim2.run_steps(30)

    assert np.allclose(nengo_curves, nengo_dl_curves)
    assert np.allclose(sim.data[p], sim2.data[p])
项目:nengo_dl    作者:nengo    | 项目源码 | 文件源码
def __init__(self, *args, **kwargs):
        logging.basicConfig(level=logging.WARNING)

        if "NENGO_DL_TEST_PRECISION" in os.environ:
            if os.environ["NENGO_DL_TEST_PRECISION"] == "32":
                kwargs.setdefault("dtype", tf.float32)
            else:
                kwargs.setdefault("dtype", tf.float64)

        if "NENGO_DL_TEST_UNROLL" in os.environ:
            kwargs.setdefault("unroll_simulation",
                              int(os.environ["NENGO_DL_TEST_UNROLL"]))

        if "NENGO_DL_TEST_DEVICE" in os.environ:
            device = os.environ["NENGO_DL_TEST_DEVICE"]
            if device == "None":
                kwargs.setdefault("device", None)
            else:
                kwargs.setdefault("device", os.environ["NENGO_DL_TEST_DEVICE"])

        super(Simulator, self).__init__(*args, **kwargs)
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_training(self, image_size = [151,151]):
    """Create the cost function and trainer"""
    self.phi_input = tf.stop_gradient(tf.placeholder("float32", [None, image_size[0], image_size[1], 1]));
    def cost(output, phi_in):
       #return np.array([self.cost(o, phi_in) for o in output]);
      return np.sum(self.cost_func(output, phi_in));

    def cost_grad(op, grad):
      #print op
      output = op.inputs[0];
      phi = op.inputs[1];
      grad = tf.py_func(self.cost_func_grad, [output, phi], [tf.float32])[0];
      #return [self.cost_func_grad(output, phi_in, epsilon = 0.01), np.zeros((phi_in.shape))];
      return [grad, None];

    self.cost_tf = py_func(cost, [self.output, self.phi_input], [tf.float32], grad = cost_grad)[0];
    #self.cost_tf = tf.py_func(cost, [self.output, self.phi_input], [tf.float64])[0];
    #self.phi = tf.py_func(phi_func, [self.output], [tf.float64]);  
    #self.cost = tf.reduce_mean(tf.squared_difference(self.phi_input, self.phi));

    self.train_tf = tf.train.RMSPropOptimizer(0.00025,0.99,0.0,1e-6).minimize(self.cost_tf)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def _convert_string_dtype(dtype):
    if dtype == 'float16':
        return tf.float16
    if dtype == 'float32':
        return tf.float32
    elif dtype == 'float64':
        return tf.float64
    elif dtype == 'int16':
        return tf.int16
    elif dtype == 'int32':
        return tf.int32
    elif dtype == 'int64':
        return tf.int64
    elif dtype == 'uint8':
        return tf.int8
    elif dtype == 'uint16':
        return tf.uint16
    else:
        raise ValueError('Unsupported dtype:', dtype)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def conv1d(x, kernel, stride=1, border_mode='valid',
           image_shape=None, filter_shape=None):
    '''1D convolution.

    # Arguments
        kernel: kernel tensor.
        strides: stride integer.
        border_mode: string, "same" or "valid".
    '''
    # pre-process dtype
    x_dtype = dtype(x)
    if x_dtype == 'float64':
        x = tf.cast(x, 'float32')
        kernel = tf.cast(kernel, 'float32')
    padding = _preprocess_border_mode(border_mode)
    x = tf.nn.conv1d(x, kernel, stride, padding=padding)
    # post-process dtype
    if x_dtype == 'float64':
        x = tf.cast(x, 'float64')
    return x
项目:ParametricGP-in-Python    作者:maziarraissi    | 项目源码 | 文件源码
def train(self):
        print("Total number of parameters: %d" % (self.hyp.shape[0]))

        X_tf = tf.placeholder(tf.float64)
        y_tf = tf.placeholder(tf.float64)
        hyp_tf = tf.Variable(self.hyp, dtype=tf.float64)

        train = self.likelihood(hyp_tf, X_tf, y_tf)

        init = tf.global_variables_initializer()
        self.sess.run(init)

        start_time = timeit.default_timer()
        for i in range(1,self.max_iter+1):
            # Fetch minibatch
            X_batch, y_batch = fetch_minibatch(self.X,self.y,self.N_batch)
            self.sess.run(train, {X_tf:X_batch, y_tf:y_batch})

            if i % self.monitor_likelihood == 0:
                elapsed = timeit.default_timer() - start_time
                nlml = self.sess.run(self.nlml)
                print('Iteration: %d, NLML: %.2f, Time: %.2f' % (i, nlml, elapsed))
                start_time = timeit.default_timer()

        self.hyp = self.sess.run(hyp_tf)
项目:autodiff    作者:bgavran    | 项目源码 | 文件源码
def setUp(self):
        np.random.seed(1337)
        h_val = np.random.randn(2, 5)
        b0_val = np.random.randn(5)
        b1_val = np.random.randn(1, 5)
        b2_val = 7

        self.my_h = ad.Variable(h_val, name="h")
        self.my_b0 = ad.Variable(b0_val, name="b0")
        self.my_b1 = ad.Variable(b1_val, name="b1")
        self.my_b2 = ad.Variable(b2_val, name="b2")

        self.tf_h = tf.constant(h_val, dtype=tf.float64)
        self.tf_b0 = tf.constant(b0_val, dtype=tf.float64)
        self.tf_b1 = tf.constant(b1_val, dtype=tf.float64)
        self.tf_b2 = tf.constant(b2_val, dtype=tf.float64)
项目:neural_style_tensorflow    作者:burness    | 项目源码 | 文件源码
def preprocess(image, size, max_length):
    shape = tf.shape(image)
    size_t = tf.constant(size, tf.float64)
    height = tf.cast(shape[0], tf.float64)
    width = tf.cast(shape[1], tf.float64)

    cond_op = tf.less(width, height) if max_length else tf.less(height, width)

    new_height, new_width = tf.cond(
        cond_op, lambda: (size_t, (width * size_t) / height),
        lambda: ((height * size_t) / width, size_t))
    new_size = [tf.to_int32(new_height), tf.to_int32(new_width)]
    resized_image = tf.image.resize_images(image, new_size)
    normalised_image = resized_image - mean_pixel
    return normalised_image


# max_length: Wether size dictates longest or shortest side. Default longest
项目:image_captioning    作者:AgrawalAmey    | 项目源码 | 文件源码
def forward(self, x, y, mask):

        self.N = x.get_shape()[0].value
        self.T = x.get_shape()[1].value 
        self.V = x.get_shape()[2].value

        x_flat = tf.reshape(x, [self.N * self.T, self.V])
        y_flat = tf.reshape(y, [self.N * self.T])
        mask_flat = tf.cast(tf.reshape(mask, [self.N * self.T]), tf.float64)

        probs = tf.exp(x_flat - tf.reduce_max(x_flat, reduction_indices=[1], keep_dims=True))
        probs /= tf.reduce_sum(probs, reduction_indices=[1], keep_dims=True)
        coords = tf.transpose(tf.pack([tf.range(self.N * self.T), y_flat]))
        loss = -tf.reduce_sum(mask_flat * tf.log(tf.gather_nd(probs, coords))) / self.N

        self.y_flat, self.mask_flat, self.probs = y_flat, mask_flat, probs

        return loss
项目:imperative    作者:yaroslavvb    | 项目源码 | 文件源码
def _Inputs(self, x=None, y=None, q=3.0, dtype=tf.float64, sizes=None):
    x = [-100, -2, -2, 0, 2, 2, 2, 100] if x is None else x
    y = [0, 0, 1, 0, 0, 1, 0.5, 1] if y is None else y
    assert len(x) == len(y)
    sizes = sizes if sizes else [len(x)]
    logits = tf.constant(x, shape=sizes, dtype=dtype, name="logits")
    targets = tf.constant(y, shape=sizes, dtype=dtype, name="targets")
    losses = np.array(self._WeightedCrossEntropy(x, y, q)).reshape(*sizes)
    return logits, targets, q, losses

  # def testConstructionNamed(self):
  #   with self.test_session():
  #     logits, targets, pos_weight, _ = self._Inputs()
  #     loss = tf.nn.weighted_cross_entropy_with_logits(logits, targets,
  #                                                     pos_weight, name="mybce")
  #   self.assertEqual("mybce", loss.op.name)
项目:imperative    作者:yaroslavvb    | 项目源码 | 文件源码
def testSoftmax(self):
    x_shape = [5, 10]
    x_np = np.random.randn(*x_shape).astype(np.float32)
    y_np = self._softmax(x_np)
    with self.test_session():
      x_tf = tf.constant(x_np)
      y_tf = tf.nn.softmax(x_tf)
      y_tf_np = y_tf.eval()
    eps = 1e-3
    self.assertAllClose(y_tf_np, y_np, eps)

  # def testGradient(self):
  #   x_shape = [5, 10]
  #   x_np = np.random.randn(*x_shape).astype(np.float64)
  #   with self.test_session():
  #     x_tf = tf.constant(x_np)
  #     y_tf = tf.nn.softmax(x_tf)
  #     err = tf.test.compute_gradient_error(x_tf, x_shape, y_tf, x_shape)
  #   eps = 1e-8
  #   self.assertLess(err, eps)


# use work-around from https://github.com/tensorflow/tensorflow/issues/2511
项目:imperative    作者:yaroslavvb    | 项目源码 | 文件源码
def testL2Loss(self):
    with self.test_session():
      x = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="x")
      l2loss = tf.nn.l2_loss(x)
      value = l2loss.eval()
    self.assertAllClose(7.0, value)

  # def testGradient(self):
  #   x_shape = [20, 7, 3]
  #   np.random.seed(1)  # Make it reproducible.
  #   x_val = np.random.random_sample(x_shape).astype(np.float64)
  #   with self.test_session():
  #     x = tf.constant(x_val, name="x")
  #     output = tf.nn.l2_loss(x)
  #     err = tf.test.compute_gradient_error(x, x_shape, output, [1])
  #   print("L2Loss gradient err = %g " % err)
  #   err_tolerance = 1e-11
  #   self.assertLess(err, err_tolerance)
项目:imperative    作者:yaroslavvb    | 项目源码 | 文件源码
def testL2Normalize(self):
    x_shape = [20]
    np.random.seed(1)
    x_np = np.random.random_sample(x_shape).astype(np.float32)
    for dim in range(len(x_shape)):
      y_np = self._l2Normalize(x_np, dim)
      with self.test_session():
        x_tf = tf.constant(x_np, name="x")
        y_tf = tf.nn.l2_normalize(x_tf, dim)
        self.assertAllClose(y_np, y_tf.eval())

  # def testL2NormalizeGradient(self):
  #   x_shape = [20, 7, 3]
  #   np.random.seed(1)
  #   x_np = np.random.random_sample(x_shape).astype(np.float64)
  #   for dim in range(len(x_shape)):
  #     with self.test_session():
  #       x_tf = tf.constant(x_np, name="x")
  #       y_tf = tf.nn.l2_normalize(x_tf, dim)
  #       err = tf.test.compute_gradient_error(x_tf, x_shape, y_tf, x_shape)
  #     print("L2Normalize gradient err = %g " % err)
  #     self.assertLess(err, 1e-4)
项目:imperative    作者:yaroslavvb    | 项目源码 | 文件源码
def testSum1CacheCpu(self):
    env = imperative.Env(tf)
    is_graph_changed(env)
    env.disable_gc()
    with env.g.device("cpu:0"):
      val1 = env.numpy_to_itensor([1, 2, 3])
      val2 = env.numpy_to_itensor([4, 5, 6])
      val3 = env.numpy_to_itensor([4, 5, 6], dtype=tf.float64)
      try:
        out1 = env.sum1(val1)
      except:
        import pdb;
        pdb.post_mortem()
      self.assertTrue(is_graph_changed(env))
      out2 = env.sum1(val2)
      self.assertFalse(is_graph_changed(env))
      out3 = env.sum1(val3)
      self.assertTrue(is_graph_changed(env))
项目:imperative    作者:yaroslavvb    | 项目源码 | 文件源码
def testSum1CacheGpu(self):
    if not tf.test.is_built_with_cuda():
      return True
    if not self.haveGpu0():
      return True

    env = imperative.Env(tf)
    with env.g.device("cpu:0"):
      val1 = env.numpy_to_itensor([1, 2, 3])
      val2 = env.numpy_to_itensor([4, 5, 6])
      val3 = env.numpy_to_itensor([4, 5, 6], dtype=tf.float64)
      try:
        out1 = env.sum1(val1)
      except:
        import pdb;
        pdb.post_mortem()
      self.assertTrue(is_graph_changed(env))
      out2 = env.sum1(val2)
      self.assertFalse(is_graph_changed(env))
      out3 = env.sum1(val3)
      self.assertTrue(is_graph_changed(env))
项目:Deep-Fashion    作者:TomPyonsuke    | 项目源码 | 文件源码
def average_precision_voc12(precision, recall, name=None):
    """Compute (interpolated) average precision from precision and recall Tensors.

    The implementation follows Pascal 2012 and ILSVRC guidelines.
    See also: https://sanchom.wordpress.com/tag/average-precision/
    """
    with tf.name_scope(name, 'average_precision_voc12', [precision, recall]):
        # Convert to float64 to decrease error on Riemann sums.
        precision = tf.cast(precision, dtype=tf.float64)
        recall = tf.cast(recall, dtype=tf.float64)

        # Add bounds values to precision and recall.
        precision = tf.concat([[0.], precision, [0.]], axis=0)
        recall = tf.concat([[0.], recall, [1.]], axis=0)
        # Ensures precision is increasing in reverse order.
        precision = tfe_math.cummax(precision, reverse=True)

        # Riemann sums for estimating the integral.
        # mean_pre = (precision[1:] + precision[:-1]) / 2.
        mean_pre = precision[1:]
        diff_rec = recall[1:] - recall[:-1]
        ap = tf.reduce_sum(mean_pre * diff_rec)
        return ap
项目:Deep-Fashion    作者:TomPyonsuke    | 项目源码 | 文件源码
def average_precision_voc07(precision, recall, name=None):
    """Compute (interpolated) average precision from precision and recall Tensors.

    The implementation follows Pascal 2007 guidelines.
    See also: https://sanchom.wordpress.com/tag/average-precision/
    """
    with tf.name_scope(name, 'average_precision_voc07', [precision, recall]):
        # Convert to float64 to decrease error on cumulated sums.
        precision = tf.cast(precision, dtype=tf.float64)
        recall = tf.cast(recall, dtype=tf.float64)
        # Add zero-limit value to avoid any boundary problem...
        precision = tf.concat([precision, [0.]], axis=0)
        recall = tf.concat([recall, [np.inf]], axis=0)

        # Split the integral into 10 bins.
        l_aps = []
        for t in np.arange(0., 1.1, 0.1):
            mask = tf.greater_equal(recall, t)
            v = tf.reduce_max(tf.boolean_mask(precision, mask))
            l_aps.append(v / 11.)
        ap = tf.add_n(l_aps)
        return ap
项目:Deep-Fashion    作者:TomPyonsuke    | 项目源码 | 文件源码
def _precision_recall(n_gbboxes, n_detections, scores, tp, fp, scope=None):
    """Compute precision and recall from scores, true positives and false
    positives booleans arrays
    """
    # Sort by score.
    with tf.name_scope(scope, 'prec_rec', [n_gbboxes, scores, tp, fp]):
        # Sort detections by score.
        scores, idxes = tf.nn.top_k(scores, k=n_detections, sorted=True)
        tp = tf.gather(tp, idxes)
        fp = tf.gather(fp, idxes)
        # Computer recall and precision.
        dtype = tf.float64
        tp = tf.cumsum(tf.cast(tp, dtype), axis=0)
        fp = tf.cumsum(tf.cast(fp, dtype), axis=0)
        recall = _safe_div(tp, tf.cast(n_gbboxes, dtype), 'recall')
        precision = _safe_div(tp, tp + fp, 'precision')

        return tf.tuple([precision, recall])
项目:tensorflow-quorakaggle    作者:ram1988    | 项目源码 | 文件源码
def buildRNN(self,x,scope):
        print(x)
        x = tf.transpose(x, [1, 0, 2])        
        #print(x)
        x = tf.reshape(x, [-1,self.nfeatures])
        #print(x)
        x = tf.split(x, self.n_steps, 0)
        print(x)
        #lstm_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0) for _ in range(self.n_layers)], state_is_tuple=True)
        #outputs, states = tf.nn.dynamic_rnn(lstm_cell, x, dtype=tf.float64)
        with tf.name_scope("fw"+scope),tf.variable_scope("fw"+scope):
            fw_cell_array = []
            print(tf.get_variable_scope().name)
            for _ in range(self.n_layers):
                fw_cell = rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=True)
                #fw_cell = rnn.DropoutWrapper(fw_cell,output_keep_prob=self.dropout)                
                fw_cell_array.append(fw_cell)
            fw_cell = rnn.MultiRNNCell(fw_cell_array, state_is_tuple=True)
        with tf.name_scope("bw"+scope),tf.variable_scope("bw"+scope):
            bw_cell_array = []
            print(tf.get_variable_scope().name)
            for _ in range(self.n_layers):
                bw_cell = rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=True)
                #bw_cell = rnn.DropoutWrapper(bw_cell,output_keep_prob=self.dropout)
                bw_cell_array.append(bw_cell)
            bw_cell = rnn.MultiRNNCell(bw_cell_array, state_is_tuple=True)

        outputs, _,_ = tf.contrib.rnn.static_bidirectional_rnn(fw_cell, bw_cell, x, dtype=tf.float64)
        #outputs, = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, x, dtype=tf.float64)


        print(outputs)
        print(outputs[-1])

        return outputs[-1]
项目:tensorflow-quorakaggle    作者:ram1988    | 项目源码 | 文件源码
def buildSiameseNN(self, left_nn, right_nn):
        #construct fully connected layer-extend even more networks
        print(self.nfeatures)
        weights = {
          'out': tf.Variable(tf.random_normal([2*self.nfeatures, self.n_classes],dtype=tf.float64),dtype = tf.float64)
        }
        biases = {
          'out': tf.Variable(tf.random_normal([self.n_classes],dtype=tf.float64),dtype = tf.float64)
        }

        joint_layer = tf.concat([left_nn,right_nn],1)
        print("joint layer-->"+str(joint_layer))
        batch_normalized = self.insertBatchNNLayer(joint_layer,[0],[2*self.nfeatures])        
        batch_normalized = tf.matmul(batch_normalized, weights['out']) + biases['out']
        result = tf.nn.softmax(batch_normalized)
        #add softmax layer
        return result
项目:prisma    作者:hijkzzz    | 项目源码 | 文件源码
def preprocess(image, size):
    shape = tf.shape(image)
    size_t = tf.constant(size, tf.float64)
    height = tf.cast(shape[0], tf.float64)
    width = tf.cast(shape[1], tf.float64)

    cond_op = tf.less(height, width)

    # ?????
    new_height, new_width = tf.cond(
        cond_op,
        lambda: (size_t, (width * size_t) / height),
        lambda: ((height * size_t) / width, size_t))

    resized_image = tf.image.resize_images(
            image,
            [tf.to_int32(new_height), tf.to_int32(new_width)],
            method=tf.image.ResizeMethod.BICUBIC)
    cropped = tf.image.resize_image_with_crop_or_pad(resized_image, size, size)

    return cropped
项目:Doubly-Stochastic-DGP    作者:ICL-SML    | 项目源码 | 文件源码
def multisample_conditional(self, X, full_cov=False):
        if full_cov is True:
            # this is unlikely to be called in a performance critical application, so we use
            # this clear but slow implementation
            f = lambda a: self.conditional(a, full_cov=full_cov)
            mean, var = tf.map_fn(f, X, dtype=(tf.float64, tf.float64))
            return tf.stack(mean), tf.stack(var)
        else:
            # this should be faster as only computes the Z_uu once, but could be made faster
            # still perhaps by avoiding reshaping (but need to rewrite conditional)
            S, N, D = shape_as_list(X)
            X_flat = tf.reshape(X, [S*N, D])
            mean, var = self.conditional(X_flat)
            return [tf.reshape(m, [S, N, -1]) for m in [mean, var]]
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def wasserstein_disagreement_map(prediction, ground_truth, M):
    """
    Function to calculate the pixel-wise Wasserstein distance between the
    flattened pred_proba and the flattened labels (ground_truth) with respect
    to the distance matrix on the label space M.

    :param prediction: the logits after softmax
    :param ground_truth: segmentation ground_truth
    :param M: distance matrix on the label space
    :return: the pixelwise distance map (wass_dis_map)
    """
    # pixel-wise Wassertein distance (W) between flat_pred_proba and flat_labels
    # wrt the distance matrix on the label space M
    n_classes = K.int_shape(prediction)[-1]
    # unstack_labels = tf.unstack(ground_truth, axis=-1)
    ground_truth = tf.cast(ground_truth, dtype=tf.float64)
    # unstack_pred = tf.unstack(prediction, axis=-1)
    prediction = tf.cast(prediction, dtype=tf.float64)
    # print("shape of M", M.shape, "unstacked labels", unstack_labels,
    #       "unstacked pred" ,unstack_pred)
    # W is a weighting sum of all pairwise correlations (pred_ci x labels_cj)
    pairwise_correlations = []
    for i in range(n_classes):
        for j in range(n_classes):
            pairwise_correlations.append(
                M[i, j] * tf.multiply(prediction[:,i], ground_truth[:,j]))
    wass_dis_map = tf.add_n(pairwise_correlations)
    return wass_dis_map