Python tensorflow 模块,complex64() 实例源码

我们从Python开源项目中,提取了以下43个代码示例,用于说明如何使用tensorflow.complex64()

项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def stft(wav, n_fft=1024, overlap=4, dt=tf.int32, absp=False):
    assert (wav.shape[0] > n_fft)
    X = tf.placeholder(dtype=dt,shape=wav.shape)
    X = tf.cast(X,tf.float32)
    hop = n_fft / overlap

    ## prepare constant variable
    Pi = tf.constant(np.pi, dtype=tf.float32)
    W = tf.constant(scipy.hanning(n_fft), dtype=tf.float32)
    S = tf.pack([tf.fft(tf.cast(tf.multiply(W,X[i:i+n_fft]),\
            tf.complex64)) for i in range(1, wav.shape[0] - n_fft, hop)])
    abs_S = tf.complex_abs(S)
    sess = tf.Session()
    if absp:
        return sess.run(abs_S, feed_dict={X:wav})
    else:
        return sess.run(S, feed_dict={X:wav})
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testNesterovMomentum(self):
    for dtype in [tf.complex64]:
      with self.test_session():
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([3.0, 4.0], dtype=dtype)
        var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
        var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
        accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
        accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
        cost = 5 * var0 * var0 + 3 * var1
        global_step = tf.Variable(tf.zeros([], tf.int64), name='global_step')
        mom_op = ctf.train.CplxMomentumOptimizer(learning_rate=2.0, momentum=0.9,
            use_nesterov=True)
        opt_op = mom_op.minimize(cost, global_step, [var0, var1])
        tf.global_variables_initializer().run()
        for t in range(1, 5):
          opt_op.run()
          var0_np, accum0_np = self._update_nesterov_momentum_numpy(var0_np,
              accum0_np, var0_np * 10, 2.0, 0.9)
          var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
              accum1_np, 3, 2.0, 0.9)
          self.assertAllClose(var0_np, var0.eval())
          self.assertAllClose(var1_np, var1.eval())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testTwoSessions(self):
    optimizer = ctf.train.CplxAdamOptimizer()
    g = tf.Graph()
    with g.as_default():
      with tf.Session():
        var0 = tf.Variable(np.array([1.0+1.0j, 2.0+2.0j], dtype=np.complex64),
                           name="v0")
        grads0 = tf.constant(np.array([0.1+0.1j, 0.1+0.1j], dtype=np.complex64))
        optimizer.apply_gradients([(grads0, var0)])

    gg = tf.Graph()
    with gg.as_default():
      with tf.Session():
        var0 = tf.Variable(np.array([1.0+1.0j, 2.0+2.0j], dtype=np.complex64),
                           name="v0")
        grads0 = tf.constant(np.array([0.1+0.1j, 0.1+0.1j], dtype=np.complex64))

        # If the optimizer saves any state not keyed by graph the following line
        # fails.
        optimizer.apply_gradients([(grads0, var0)])
项目:MobileNet    作者:Zehaos    | 项目源码 | 文件源码
def get_mu_tensor(self):
    const_fact = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
    coef = tf.Variable([-1.0, 3.0, 0.0, 1.0], dtype=tf.float32, name="cubic_solver_coef")
    coef = tf.scatter_update(coef, tf.constant(2), -(3 + const_fact) )        
    roots = tf.py_func(np.roots, [coef], Tout=tf.complex64, stateful=False)

    # filter out the correct root
    root_idx = tf.logical_and(tf.logical_and(tf.greater(tf.real(roots), tf.constant(0.0) ),
      tf.less(tf.real(roots), tf.constant(1.0) ) ), tf.less(tf.abs(tf.imag(roots) ), 1e-5) )
    # in case there are two duplicated roots satisfying the above condition
    root = tf.reshape(tf.gather(tf.gather(roots, tf.where(root_idx) ), tf.constant(0) ), shape=[] )
    tf.assert_equal(tf.size(root), tf.constant(1) )

    dr = self._h_max / self._h_min
    mu = tf.maximum(tf.real(root)**2, ( (tf.sqrt(dr) - 1)/(tf.sqrt(dr) + 1) )**2)    
    return mu
项目:source_separation_ml_jeju    作者:hjkwon0609    | 项目源码 | 文件源码
def apply_mask(spec, mask):
    mag_spec = tf.abs(spec)
    phase_spec = get_phase(spec)
    return tf.multiply(tf.cast(tf.multiply(mag_spec, mask), tf.complex64), tf.exp(tf.complex(tf.zeros_like(mag_spec), phase_spec)))
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testCplxL2Loss(self):
    for dtype in [tf.complex64]:
      with self.test_session(force_gpu=True):
        x = tf.constant([1.0+1.0j, 0.0-2.0j, 3.0-0.0j, 2.0+1.0j], shape=[2, 2],
                        name="x", dtype=dtype)
        l2loss = ctf.nn.cplx_l2_loss(x)
        value = l2loss.eval()
      self.assertAllClose(10.0, value)
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def _toType(self, dtype):
    if dtype == np.complex64:
      return tf.complex64
    else:
      assert False, (dtype)
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testApplyAdam(self):
    for dtype, force_gpu in itertools.product(
        [np.complex64], [True]):
      var = np.arange(100).astype(dtype)
      m = np.arange(1, 101).astype(dtype)
      v = np.arange(101, 201).astype(dtype)
      grad = np.arange(100).astype(dtype)
      self._testTypesForAdam(var, m, v, grad, force_gpu)
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testBasic(self):
    for dtype in [tf.complex64]:
      with self.test_session(force_gpu=True):
        v0 = [1.0+2.0j, 2.0+1.0j]
        v1 = [3.0-4.0j, 4.0-3.0j]
        g0 = [0.1+0.1j, 0.1-0.1j]
        g1 = [0.01-0.01j, 0.01+0.01j]
        lr = 3.0-1.5j 
        var0 = tf.Variable(v0, dtype=dtype)
        var1 = tf.Variable(v1, dtype=dtype)
        grads0 = tf.constant(g0, dtype=dtype)
        grads1 = tf.constant(g1, dtype=dtype)
        sgd_op = ctf.train.CplxGradientDescentOptimizer(
          lr).apply_gradients(zip([grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType(v0, var0.eval())
        self.assertAllCloseAccordingToType(v1, var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        self.assertAllCloseAccordingToType(
            [v0[0] - lr * g0[0],
             v0[1] - lr * g0[1]], var0.eval())
        self.assertAllCloseAccordingToType(
            [v1[0] - lr * g1[0],
             v1[1] - lr * g1[1]], var1.eval())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testTensorLearningRate(self):
    for dtype in [tf.complex64]:
      with self.test_session(force_gpu=True):
        v0 = [1.0+2.0j, 2.0+1.0j]
        v1 = [3.0-4.0j, 4.0-3.0j]
        g0 = [0.1+0.1j, 0.1-0.1j]
        g1 = [0.01-0.01j, 0.01+0.01j]
        lr = 3.0-1.5j 
        var0 = tf.Variable(v0, dtype=dtype)
        var1 = tf.Variable(v1, dtype=dtype)
        grads0 = tf.constant(g0, dtype=dtype)
        grads1 = tf.constant(g1, dtype=dtype)
        lrate = tf.constant(lr)
        sgd_op = ctf.train.CplxGradientDescentOptimizer(
          lrate).apply_gradients(zip([grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType(v0, var0.eval())
        self.assertAllCloseAccordingToType(v1, var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        self.assertAllCloseAccordingToType(
            [v0[0] - lr * g0[0],
             v0[1] - lr * g0[1]], var0.eval())
        self.assertAllCloseAccordingToType(
            [v1[0] - lr * g1[0],
             v1[1] - lr * g1[1]], var1.eval())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testGradWrtRef(self):
    for dtype in [tf.complex64]:
      with self.test_session(force_gpu=True):
        values = [1.0+2.0j, 2.0+1.0j]
        lr = 3.0-1.5j 
        opt = ctf.train.CplxGradientDescentOptimizer(lr)
        values = [1.0, 3.0]
        vars_ = [tf.Variable([v], dtype=dtype) for v in values]
        grads_and_vars = opt.compute_gradients(
          vars_[0]._ref() + vars_[1], vars_)
        tf.global_variables_initializer().run()
        for grad, _ in grads_and_vars:
          self.assertAllCloseAccordingToType([1.0], grad.eval())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testTensorLearningRate(self):
    for dtype in [tf.complex64]:
      with self.test_session(force_gpu=True):
        # Initialize variables for numpy implementation.
        m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
        var0_np = np.array([1.0-1.0j, 2.0-2.0j], dtype=dtype.as_numpy_dtype)
        grads0_np = np.array([0.1+0.1j, 0.1-0.1j], dtype=dtype.as_numpy_dtype)
        var1_np = np.array([3.0+3.0j, 4.0-4.0j], dtype=dtype.as_numpy_dtype)
        grads1_np = np.array([0.01-0.01j, 0.01+0.01j],
                             dtype=dtype.as_numpy_dtype)

        var0 = tf.Variable(var0_np)
        var1 = tf.Variable(var1_np)
        grads0 = tf.constant(grads0_np)
        grads1 = tf.constant(grads1_np)
        opt = tf.train.AdamOptimizer(tf.constant(0.001))
        update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()

        # Fetch params to validate initial values
        self.assertAllClose([1.0-1.0j, 2.0-2.0j], var0.eval())
        self.assertAllClose([3.0+3.0j, 4.0-4.0j], var1.eval())

        beta1_power, beta2_power = opt._get_beta_accumulators()

        # Run 3 steps of Adam
        for t in range(1, 4):
          self.assertAllCloseAccordingToType(0.9 ** t, beta1_power.eval())
          self.assertAllCloseAccordingToType(0.999 ** t, beta2_power.eval())
          update.run()

          var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
          var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)

          # Validate updated params
          self.assertAllCloseAccordingToType(var0_np, var0.eval())
          self.assertAllCloseAccordingToType(var1_np, var1.eval())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testComplexReduce3D(self):
    # Create a 3D array of floats and reduce across all possible
    # dimensions
    np_arr = (np.linspace(10, -10, 30) +
              1j*np.linspace(-10, 10, 30)).reshape(
                [2, 3, 5]).astype(np.complex64)
    self._compareAll(np_arr, None)
    self._compareAll(np_arr, [])
    self._compareAll(np_arr, [0])
    self._compareAll(np_arr, [1])
    self._compareAll(np_arr, [2])
    self._compareAll(np_arr, [0, 1])
    self._compareAll(np_arr, [1, 2])
    self._compareAll(np_arr, [0, 2])
    self._compareAll(np_arr, [0, 1, 2])
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testInfinity(self):
    for dtype in [np.complex64]:
      for special_value_x in [-np.inf, np.inf]:
        for special_value_y in [-np.inf, np.inf]:
          np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
          self._compareAll(np_arr, None)
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def _testTypes(self, vals):
    for dtype in [np.complex64]:
      self.setUp()
      x = vals.astype(dtype)
      tftype = _NP_TO_TF[dtype]
      self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=False))
      # NOTE(touts): the GPU test should pass for all types, whether the
      # Variable op has an implementation for that type on GPU as we expect
      # that Variable and Assign have GPU implementations for matching tf.
      self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=True))
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testset_shape(self):
    p = state_ops.variable_op([1, 2], tf.complex64)
    self.assertEqual([1, 2], p.get_shape())
    p = state_ops.variable_op([1, 2], tf.complex64, set_shape=False)
    self.assertEqual(tensor_shape.unknown_shape(), p.get_shape())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testAssign(self):
    value = np.array([[42.0+42.0j, 43.0+43.0j]])
    var = state_ops.variable_op(value.shape, tf.complex64)
    self.assertShapeEqual(value, var)
    assigned = tf.assign(var, value)
    self.assertShapeEqual(value, assigned)
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testAssignNoValidateShape(self):
    value = np.array([[42.0+42.0j, 43.0+43.0j]])
    var = state_ops.variable_op(value.shape, tf.complex64)
    self.assertShapeEqual(value, var)
    assigned = tf.assign(var, value, validate_shape=False)
    self.assertShapeEqual(value, assigned)
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testAssignNoVarShape(self):
    value = np.array([[42.0+42.0j, 43.0+43.0j]])
    var = state_ops.variable_op(value.shape, tf.complex64, set_shape=False)
    self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
    assigned = tf.assign(var, value)
    self.assertShapeEqual(value, assigned)
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def _NewShapelessTensor(self):
    tensor = tf.placeholder(tf.complex64)
    self.assertEqual(tensor_shape.unknown_shape(), tensor.get_shape())
    return tensor
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testAssignNoValueShape(self):
    value = self._NewShapelessTensor()
    shape = [1, 2]
    var = state_ops.variable_op(shape, tf.complex64)
    assigned = tf.assign(var, value)
    self.assertEqual(shape, var.get_shape())
    self.assertEqual(shape, assigned.get_shape())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testAssignNoValueShapeNoValidateShape(self):
    value = self._NewShapelessTensor()
    shape = [1, 2]
    var = state_ops.variable_op(shape, tf.complex64)
    self.assertEqual(shape, var.get_shape())
    assigned = tf.assign(var, value, validate_shape=False)
    self.assertEqual(tensor_shape.unknown_shape(), assigned.get_shape())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testAssignNoShape(self):
    with self.test_session():
      value = self._NewShapelessTensor()
      var = state_ops.variable_op([1, 2], tf.complex64, set_shape=False)
      self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
      self.assertEqual(tensor_shape.unknown_shape(),
                       tf.assign(var, value).get_shape())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testAssignNoShapeNoValidateShape(self):
    with self.test_session():
      value = self._NewShapelessTensor()
      var = state_ops.variable_op([1, 2], tf.complex64, set_shape=False)
      self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
      self.assertEqual(tensor_shape.unknown_shape(),
                       tf.assign(var, value, validate_shape=False).get_shape())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testAssignUpdateNoVarShape(self):
    var = state_ops.variable_op([1, 2], tf.complex64, set_shape=False)
    added = tf.assign_add(var, [[2.0+2.0j, 3.0+3.0j]])
    self.assertEqual([1, 2], added.get_shape())
    subbed = tf.assign_sub(var, [[12.0+12.0j, 13.0+13.0j]])
    self.assertEqual([1, 2], subbed.get_shape())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testAssignUpdateNoValueShape(self):
    var = state_ops.variable_op([1, 2], tf.complex64)
    added = tf.assign_add(var, self._NewShapelessTensor())
    self.assertEqual([1, 2], added.get_shape())
    subbed = tf.assign_sub(var, self._NewShapelessTensor())
    self.assertEqual([1, 2], subbed.get_shape())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testAssignUpdateNoShape(self):
    var = state_ops.variable_op([1, 2], tf.complex64, set_shape=False)
    added = tf.assign_add(var, self._NewShapelessTensor())
    self.assertEqual(tensor_shape.unknown_shape(), added.get_shape())
    subbed = tf.assign_sub(var, self._NewShapelessTensor())
    self.assertEqual(tensor_shape.unknown_shape(), subbed.get_shape())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testTemporaryVariable(self):
    with self.test_session(use_gpu=True):
      var = gen_state_ops._temporary_variable(
          [1, 2],
          tf.complex64,
          var_name="foo")
      var = tf.assign(var, [[4.0+5.0j, 5.0+4.0j]])
      var = tf.assign_add(var, [[6.0+7.0j, 7.0+6.0j]])
      final = gen_state_ops._destroy_temporary_variable(var, var_name="foo")
      self.assertAllClose([[10.0+12.0j, 12.0+10.0j]], final.eval())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testDestroyNonexistentTemporaryVariable(self):
    with self.test_session(use_gpu=True):
      var = gen_state_ops._temporary_variable([1, 2], tf.complex64)
      final = gen_state_ops._destroy_temporary_variable(var, var_name="bad")
      with self.assertRaises(errors.NotFoundError):
        final.eval()
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testDestroyTemporaryVariableTwice(self):
    with self.test_session(use_gpu=True):
      var = gen_state_ops._temporary_variable([1, 2], tf.complex64)
      val1 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
      val2 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
      final = val1 + val2
      with self.assertRaises(errors.NotFoundError):
        final.eval()
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testTemporaryVariableNoLeak(self):
    with self.test_session(use_gpu=True):
      var = gen_state_ops._temporary_variable(
          [1, 2],
          tf.complex64,
          var_name="bar")
      final = tf.identity(var)
      final.eval()
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testTwoTemporaryVariablesNoLeaks(self):
    with self.test_session(use_gpu=True):
      var1 = gen_state_ops._temporary_variable(
          [1, 2],
          tf.complex64,
          var_name="var1")
      var2 = gen_state_ops._temporary_variable(
          [1, 2],
          tf.complex64,
          var_name="var2")
      final = var1 + var2
      final.eval()
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testAssignDependencyAcrossDevices(self):
    with self.test_session(use_gpu=True):
      # The variable and an op to increment it are on the GPU.
      var = state_ops.variable_op([1], tf.complex64)
      tf.assign(var, [1.0+2.0j]).eval()
      increment = tf.assign_add(var, [2.0+3.0j])
      with tf.control_dependencies([increment]):
        with tf.device("/cpu:0"):
          # This mul op is pinned to the CPU, but reads the variable from the
          # GPU. The test ensures that the dependency on 'increment' is still
          # honored, i.e., the Send and Recv from GPU to CPU should take place
          # only after the increment.
          result = tf.multiply(var, var)
      self.assertAllClose([-16.0+30.0j], result.eval())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testIsVariableInitialized(self):
    for use_gpu in [True, False]:
      with self.test_session(use_gpu=use_gpu):
        v0 = state_ops.variable_op([1, 2], tf.complex64)
        self.assertEqual(False, tf.is_variable_initialized(v0).eval())
        tf.assign(v0, [[2.0+3.0j, 3.0+2.0j]]).eval()
        self.assertEqual(True, tf.is_variable_initialized(v0).eval())
项目:Mendelssohn    作者:diggerdu    | 项目源码 | 文件源码
def istft(spec, overlap=4):
    assert (spec.shape[0] > 1)
    S = placeholder(dtype=tf.complex64, shape=spec.shape)
    X = tf.complex_abs(tf.concat(0, [tf.ifft(frame) \
            for frame in tf.unstack(S)]))
    sess = tf.Session()
    return sess.run(X, feed_dict={S:spec})
项目:neural-decoder    作者:Krastanov    | 项目源码 | 文件源码
def s_binary_crossentropy(self, y_true, y_pred):
        if self.p:
            y_pred = undo_normcentererr(y_pred, self.p)
            y_true = undo_normcentererr(y_true, self.p)
        s_true = K.dot(y_true, K.transpose(self.H))%2
        twopminusone = 2*y_pred-1
        s_pred = ( 1 - tf.real(K.exp(K.dot(K.log(tf.cast(twopminusone, tf.complex64)), tf.cast(K.transpose(self.H), tf.complex64)))) ) / 2
        return K.mean(K.binary_crossentropy(s_pred, s_true), axis=-1)
项目:tensorflow_compact_bilinear_pooling    作者:ronghanghu    | 项目源码 | 文件源码
def _SequentialBatchFFTGrad(op, grad):
    if (grad.dtype == tf.complex64):
        size = tf.cast(tf.shape(grad)[1], tf.float32)
        return (sequential_batch_ifft(grad, op.get_attr("compute_size"))
            * tf.complex(size, 0.))
    else:
        size = tf.cast(tf.shape(grad)[1], tf.float64)
        return (sequential_batch_ifft(grad, op.get_attr("compute_size"))
            * tf.complex(size, tf.zeros([], tf.float64)))
项目:tensorflow_compact_bilinear_pooling    作者:ronghanghu    | 项目源码 | 文件源码
def _SequentialBatchIFFTGrad(op, grad):
    if (grad.dtype == tf.complex64):
        rsize = 1. / tf.cast(tf.shape(grad)[1], tf.float32)
        return (sequential_batch_fft(grad, op.get_attr("compute_size"))
            * tf.complex(rsize, 0.))
    else:
        rsize = 1. / tf.cast(tf.shape(grad)[1], tf.float64)
        return (sequential_batch_fft(grad, op.get_attr("compute_size"))
            * tf.complex(rsize, tf.zeros([], tf.float64)))
项目:tacotron    作者:keithito    | 项目源码 | 文件源码
def _griffin_lim_tensorflow(S):
  '''TensorFlow implementation of Griffin-Lim
  Based on https://github.com/Kyubyong/tensorflow-exercises/blob/master/Audio_Processing.ipynb
  '''
  with tf.variable_scope('griffinlim'):
    # TensorFlow's stft and istft operate on a batch of spectrograms; create batch of size 1
    S = tf.expand_dims(S, 0)
    S_complex = tf.identity(tf.cast(S, dtype=tf.complex64))
    y = _istft_tensorflow(S_complex)
    for i in range(hparams.griffin_lim_iters):
      est = _stft_tensorflow(y)
      angles = est / tf.cast(tf.maximum(1e-8, tf.abs(est)), tf.complex64)
      y = _istft_tensorflow(S_complex * angles)
    return tf.squeeze(y, 0)
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testBasic(self):
    for dtype in [tf.complex64]:
      with self.test_session():
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([3.0, 4.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.1], dtype=dtype)
        grads1 = tf.constant([0.01, 0.01], dtype=dtype)
        mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
        mom_update = mom_opt.apply_gradients(
            zip([grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()
        # Check we have slots
        self.assertEqual(["momentum"], mom_opt.get_slot_names())
        slot0 = mom_opt.get_slot(var0, "momentum")
        self.assertEquals(slot0.get_shape(), var0.get_shape())
        self.assertFalse(slot0 in tf.trainable_variables())
        slot1 = mom_opt.get_slot(var1, "momentum")
        self.assertEquals(slot1.get_shape(), var1.get_shape())
        self.assertFalse(slot1 in tf.trainable_variables())

        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())
        # Step 1: the momentum accumulators where 0. So we should see a normal
        # update: v -= grad * learning_rate
        mom_update.run()
        # Check that the momentum accumulators have been updated.
        self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
        self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
        # Check that the parameters have been updated.
        self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0),
                                                     2.0 - (0.1 * 2.0)]),
                                           var0.eval())
        self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0),
                                                     4.0 - (0.01 * 2.0)]),
                                           var1.eval())
        # Step 2: the momentum accumulators contain the previous update.
        mom_update.run()
        # Check that the momentum accumulators have been updated.
        self.assertAllCloseAccordingToType(
            np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
            slot0.eval())
        self.assertAllCloseAccordingToType(
            np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
            slot1.eval())
        # Check that the parameters have been updated.
        self.assertAllCloseAccordingToType(
            np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
                      2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
            var0.eval())
        self.assertAllCloseAccordingToType(
            np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
                      3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
            var1.eval())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testSharing(self):
    for dtype in [tf.complex64]:
      with self.test_session():
        var0 = tf.Variable([1.0, 2.0], dtype=dtype)
        var1 = tf.Variable([3.0, 4.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.1], dtype=dtype)
        grads1 = tf.constant([0.01, 0.01], dtype=dtype)
        mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
        mom_update1 = mom_opt.apply_gradients(
            zip([grads0, grads1], [var0, var1]))
        mom_update2 = mom_opt.apply_gradients(
            zip([grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()

        self.assertEqual(["momentum"], mom_opt.get_slot_names())
        slot0 = mom_opt.get_slot(var0, "momentum")
        self.assertEquals(slot0.get_shape(), var0.get_shape())
        slot1 = mom_opt.get_slot(var1, "momentum")
        self.assertEquals(slot1.get_shape(), var1.get_shape())

        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())
        # Step 1: the momentum accumulators where 0. So we should see a normal
        # update: v -= grad * learning_rate
        mom_update1.run()
        # Check that the momentum accumulators have been updated.
        self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
        self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
        # Check that the parameters have been updated.
        self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0),
                                                     2.0 - (0.1 * 2.0)]),
                                           var0.eval())
        self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0),
                                                     4.0 - (0.01 * 2.0)]),
                                           var1.eval())
        # Step 2: the second momentum accumulators contain the previous update.
        mom_update2.run()
        # Check that the momentum accumulators have been updated.
        self.assertAllCloseAccordingToType(
            np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
            slot0.eval())
        self.assertAllCloseAccordingToType(
            np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
            slot1.eval())
        # Check that the parameters have been updated.
        self.assertAllCloseAccordingToType(
            np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
                      2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
            var0.eval())
        self.assertAllCloseAccordingToType(
            np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
                      3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
            var1.eval())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testWithGlobalStep(self):
    for dtype in [tf.complex64]:
      with self.test_session(force_gpu=True):
        with tf.device('/cpu'):
          global_step = tf.Variable(0, trainable=False)
        v0 = [1.0+2.0j, 2.0+1.0j]
        v1 = [3.0-4.0j, 4.0-3.0j]
        g0 = [0.1+0.1j, 0.1-0.1j]
        g1 = [0.01-0.01j, 0.01+0.01j]
        lr = 3.0-1.5j 
        var0 = tf.Variable(v0, dtype=dtype)
        var1 = tf.Variable(v1, dtype=dtype)
        grads0 = tf.constant(g0, dtype=dtype)
        grads1 = tf.constant(g1, dtype=dtype)
        sgd_op = ctf.train.CplxGradientDescentOptimizer(lr).apply_gradients(
            zip([grads0, grads1], [var0, var1]),
            global_step=global_step)
        tf.global_variables_initializer().run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType(v0, var0.eval())
        self.assertAllCloseAccordingToType(v1, var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params and global_step
        self.assertAllCloseAccordingToType(
            [v0[0] - lr * g0[0],
             v0[1] - lr * g0[1]], var0.eval())
        self.assertAllCloseAccordingToType(
            [v1[0] - lr * g1[0],
             v1[1] - lr * g1[1]], var1.eval())
        self.assertAllCloseAccordingToType(1, global_step.eval())

  ### Currently no support for sparse complex tensors
  # def testSparseBasic(self):
  #   for dtype in [tf.complex64]:
  #     with self.test_session(force_gpu=True):
  #       var0 = tf.Variable([[1.0], [2.0]], dtype=dtype)
  #       var1 = tf.Variable([[3.0], [4.0]], dtype=dtype)
  #       grads0 = tf.IndexedSlices(
  #           tf.constant([0.1], shape=[1, 1], dtype=dtype),
  #           tf.constant([0]),
  #           tf.constant([2, 1]))
  #       grads1 = tf.IndexedSlices(
  #           tf.constant([0.01], shape=[1, 1], dtype=dtype),
  #           tf.constant([1]),
  #           tf.constant([2, 1]))
  #       sgd_op = ctf.train.CplxGradientDescentOptimizer(3.0).apply_gradients(
  #           zip([grads0, grads1], [var0, var1]))
  #       tf.initialize_all_variables().run()
  #       # Fetch params to validate initial values
  #       self.assertAllCloseAccordingToType([[1.0], [2.0]], var0.eval())
  #       self.assertAllCloseAccordingToType([[3.0], [4.0]], var1.eval())
  #       # Run 1 step of sgd
  #       sgd_op.run()
  #       # Validate updated params
  #       self.assertAllCloseAccordingToType(
  #           [[1.0 - 3.0 * 0.1], [2.0]], var0.eval())
  #       self.assertAllCloseAccordingToType(
  #           [[3.0], [4.0 - 3.0 * 0.01]], var1.eval())
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testSharing(self):
    for dtype in [tf.complex64]:
      with self.test_session(force_gpu=True):
        # Initialize variables for numpy implementation.
        m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
        m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
        var0_np = np.array([1.0-1.0j, 2.0-2.0j], dtype=dtype.as_numpy_dtype)
        grads0_np = np.array([0.1+0.1j, 0.1-0.1j], dtype=dtype.as_numpy_dtype)
        var1_np = np.array([3.0+3.0j, 4.0-4.0j], dtype=dtype.as_numpy_dtype)
        grads1_np = np.array([0.01-0.01j, 0.01+0.01j],
                             dtype=dtype.as_numpy_dtype)

        var0 = tf.Variable(var0_np)
        var1 = tf.Variable(var1_np)
        grads0 = tf.constant(grads0_np)
        grads1 = tf.constant(grads1_np)
        opt = tf.train.AdamOptimizer()
        update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        tf.global_variables_initializer().run()

        beta1_power, beta2_power = opt._get_beta_accumulators()

        # Fetch params to validate initial values
        self.assertAllClose([1.0-1.0j, 2.0-2.0j], var0.eval())
        self.assertAllClose([3.0+3.0j, 4.0-4.0j], var1.eval())

        # Run 3 steps of intertwined Adam1 and Adam2.
        for t in range(1, 4):
          self.assertAllCloseAccordingToType(0.9 ** t, beta1_power.eval())
          self.assertAllCloseAccordingToType(0.999 ** t, beta2_power.eval())
          if t % 2 == 0:
            update1.run()
          else:
            update2.run()

          var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
          var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)

          # Validate updated params
          self.assertAllCloseAccordingToType(var0_np, var0.eval())
          self.assertAllCloseAccordingToType(var1_np, var1.eval())