Python tensorflow 模块,imag() 实例源码

我们从Python开源项目中,提取了以下17个代码示例,用于说明如何使用tensorflow.imag()

项目:tensorflow_with_latest_papers    作者:NickShahML    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None ):
        zero_initer = tf.constant_initializer(0.)
        with tf.variable_scope(scope or type(self).__name__):

            #nick there are these two matrix multiplications and they are used to convert regular input sizes to complex outputs -- makes sense -- we can further modify this for lstm configurations
            mat_in = tf.get_variable('W_in', [self.input_size, self.state_size*2])
            mat_out = tf.get_variable('W_out', [self.state_size*2, self.output_size])

            in_proj = tf.matmul(inputs, mat_in)
            in_proj_c = tf.complex( in_proj[:, :self.state_size], in_proj[:, self.state_size:] )
            out_state = modrelu_c( in_proj_c + 
                ulinear_c(state,transform=self.transform),
                tf.get_variable(name='B', dtype=tf.float32, shape=[self.state_size], initializer=zero_initer)
                )
            out_bias = tf.get_variable(name='B_out', dtype=tf.float32, shape=[self.output_size], initializer = zero_initer)
            out = tf.matmul( tf.concat(1,[tf.real(out_state), tf.imag(out_state)] ), mat_out ) + out_bias
        return out, out_state
项目:MobileNet    作者:Zehaos    | 项目源码 | 文件源码
def get_mu_tensor(self):
    const_fact = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
    coef = tf.Variable([-1.0, 3.0, 0.0, 1.0], dtype=tf.float32, name="cubic_solver_coef")
    coef = tf.scatter_update(coef, tf.constant(2), -(3 + const_fact) )        
    roots = tf.py_func(np.roots, [coef], Tout=tf.complex64, stateful=False)

    # filter out the correct root
    root_idx = tf.logical_and(tf.logical_and(tf.greater(tf.real(roots), tf.constant(0.0) ),
      tf.less(tf.real(roots), tf.constant(1.0) ) ), tf.less(tf.abs(tf.imag(roots) ), 1e-5) )
    # in case there are two duplicated roots satisfying the above condition
    root = tf.reshape(tf.gather(tf.gather(roots, tf.where(root_idx) ), tf.constant(0) ), shape=[] )
    tf.assert_equal(tf.size(root), tf.constant(1) )

    dr = self._h_max / self._h_min
    mu = tf.maximum(tf.real(root)**2, ( (tf.sqrt(dr) - 1)/(tf.sqrt(dr) + 1) )**2)    
    return mu
项目:source_separation_ml_jeju    作者:hjkwon0609    | 项目源码 | 文件源码
def create_spectrogram_from_audio(data):
    global setting
    spectrogram = librosa.stft(data, n_fft=Config.n_fft, hop_length=Config.hop_length).transpose()

    # divide the real and imaginary components of each element 
    # concatenate the matrix with the real components and the matrix with imaginary components
    # (DataCorruptionError when saving complex numbers in TFRecords)

    # concatenated = np.concatenate([np.real(spectrogram), np.imag(spectrogram)], axis=1)
    return spectrogram # [num_time_frames, num_freq_bins]
项目:source_separation_ml_jeju    作者:hjkwon0609    | 项目源码 | 文件源码
def spectrogram_split_real_imag(spec):
    return np.concatenate([np.real(spec), np.imag(spec)], axis=1)
项目:source_separation_ml_jeju    作者:hjkwon0609    | 项目源码 | 文件源码
def get_phase(spec):
    return tf.imag(tf.log(spec))

############################################################################
##  Vector Product Functions
############################################################################
项目:urnn    作者:Rand0mUsername    | 项目源码 | 文件源码
def normalize(z):
    norm = tf.sqrt(tf.reduce_sum(tf.abs(z)**2))
    factor = (norm + 1e-6)
    return tf.complex(tf.real(z) / factor, tf.imag(z) / factor)

# z: complex[batch_sz, num_units]
# bias: real[num_units]
项目:urnn    作者:Rand0mUsername    | 项目源码 | 文件源码
def modReLU(z, bias): # relu(|z|+b) * (z / |z|)
    norm = tf.abs(z)
    scale = tf.nn.relu(norm + bias) / (norm + 1e-6)
    scaled = tf.complex(tf.real(z)*scale, tf.imag(z)*scale)
    return scaled

###################################################################################################333

# 4k / 7k trainable params
项目:holographic_memory    作者:jramapuram    | 项目源码 | 文件源码
def bound(x):
        bound = tf.maximum(tf.sqrt(tf.mul(tf.real(x), tf.real(x)) \
                                   + tf.mul(tf.imag(x), tf.imag(x))),
                           1.0)
        return tf.complex(tf.real(x) / bound, tf.imag(x) / bound)
项目:holographic_memory    作者:jramapuram    | 项目源码 | 文件源码
def unsplit_from_complex_ri(x):
        return tf.concat(1, [tf.real(x), tf.imag(x)])
项目:holographic_memory    作者:jramapuram    | 项目源码 | 文件源码
def unsplit_from_complex_ir(x):
        #return tf.concat(1, [tf.imag(x), tf.abs(tf.real(x))])
        return tf.abs(tf.concat(1, [tf.imag(x), tf.real(x)]))

        #mag = tf.maximum(1.0, tf.complex_abs(x))
        #x = tf.complex(tf.real(x) / (mag + 1e-10), tf.imag(x) / (mag + 1e-10))

        # real = tf.concat(1, [tf.imag(x), tf.real(x)])
        # return tf.abs(HolographicMemory.normalize_real_by_complex_abs([real])[0])
项目:factorix    作者:gbouchar    | 项目源码 | 文件源码
def sparse_dot_product0(emb, tuples, use_matmul=True, output_type='real'):
    """
    Compute the dot product of complex vectors.
    It uses complex vectors but tensorflow does not optimize in the complex space (or there is a bug in the gradient
    propagation with complex numbers...)
    :param emb: embeddings
    :param tuples: indices at which we compute dot products
    :return: scores (dot products)
    """
    n_t = tuples.get_shape()[0].value
    rk = emb.get_shape()[1].value
    emb_sel_a = tf.gather(emb, tuples[:, 0])
    emb_sel_b = tf.gather(emb, tuples[:, 1])
    if use_matmul:
        pred_cplx = tf.squeeze(tf.batch_matmul(
                tf.reshape(emb_sel_a, [n_t, rk, 1]),
                tf.reshape(emb_sel_b, [n_t, rk, 1]), adj_x=True))
    else:
        pred_cplx = tf.reduce_sum(tf.mul(tf.conj(emb_sel_a), emb_sel_b), 1)
    if output_type == 'complex':
        return pred_cplx
    elif output_type == 'real':
        return tf.real(pred_cplx) + tf.imag(pred_cplx)
    elif output_type == 'real':
        return tf.abs(pred_cplx)
    elif output_type == 'angle':
        raise NotImplementedError('No argument or inverse-tanh function for complex number in Tensorflow')
    else:
        raise NotImplementedError()
项目:tensorflow_with_latest_papers    作者:NickShahML    | 项目源码 | 文件源码
def abs2_c(z):
    return tf.real(z)*tf.real(z)+tf.imag(z)*tf.imag(z)
项目:tensorflow_with_latest_papers    作者:NickShahML    | 项目源码 | 文件源码
def complex_mul_real( z, r ):
    return tf.complex(tf.real(z)*r, tf.imag(z)*r)
项目:tensorflow_with_latest_papers    作者:NickShahML    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None ):
        with tf.variable_scope(scope or type(self).__name__):
            unitary_hidden_state, secondary_cell_hidden_state = tf.split(1,2,state)


            mat_in = tf.get_variable('mat_in', [self.input_size, self.state_size*2])
            mat_out = tf.get_variable('mat_out', [self.state_size*2, self.output_size])
            in_proj = tf.matmul(inputs, mat_in)            
            in_proj_c = tf.complex(tf.split(1,2,in_proj))
            out_state = modReLU( in_proj_c + 
                ulinear(unitary_hidden_state, self.state_size),
                tf.get_variable(name='bias', dtype=tf.float32, shape=tf.shape(unitary_hidden_state), initializer = tf.constant_initalizer(0.)),
                scope=scope)


        with tf.variable_scope('unitary_output'):
            '''computes data linear, unitary linear and summation -- TODO: should be complex output'''
            unitary_linear_output_real = linear.linear([tf.real(out_state), tf.imag(out_state), inputs], True, 0.0)


        with tf.variable_scope('scale_nonlinearity'):
            modulus = tf.complex_abs(unitary_linear_output_real)
            rescale = tf.maximum(modulus + hidden_bias, 0.) / (modulus + 1e-7)

        #transition to data shortcut connection


        #out_ = tf.matmul(tf.concat(1,[tf.real(out_state), tf.imag(out_state), ] ), mat_out) + out_bias

        #hidden state is complex but output is completely real
        return out_, out_state #complex
项目:tfdeploy    作者:riga    | 项目源码 | 文件源码
def test_Imag(self):
        t = tf.imag(tf.Variable(self.random(3, 4, complex=True)))
        self.check(t)
项目:urnn    作者:Rand0mUsername    | 项目源码 | 文件源码
def call(self, inputs, state):
        """The most basic URNN cell.
        Args:
            inputs (Tensor - batch_sz x num_in): One batch of cell input.
            state (Tensor - batch_sz x num_units): Previous cell state: COMPLEX
        Returns:
        A tuple (outputs, state):
            outputs (Tensor - batch_sz x num_units*2): Cell outputs on the whole batch.
            state (Tensor - batch_sz x num_units): New state of the cell.
        """
        #print("cell.call inputs:", inputs.shape, inputs.dtype)
        #print("cell.call state:", state.shape, state.dtype)

        # prepare input linear combination
        inputs_mul = tf.matmul(inputs, tf.transpose(self.w_ih)) # [batch_sz, 2*num_units]
        inputs_mul_c = tf.complex( inputs_mul[:, :self._num_units], 
                                   inputs_mul[:, self._num_units:] ) 
        # [batch_sz, num_units]

        # prepare state linear combination (always complex!)
        state_c = tf.complex( state[:, :self._num_units], 
                              state[:, self._num_units:] ) 

        state_mul = self.D1.mul(state_c)
        state_mul = FFT(state_mul)
        state_mul = self.R1.mul(state_mul)
        state_mul = self.P.mul(state_mul)
        state_mul = self.D2.mul(state_mul)
        state_mul = IFFT(state_mul)
        state_mul = self.R2.mul(state_mul)
        state_mul = self.D3.mul(state_mul) 
        # [batch_sz, num_units]

        # calculate preactivation
        preact = inputs_mul_c + state_mul
        # [batch_sz, num_units]

        new_state_c = modReLU(preact, self.b_h) # [batch_sz, num_units] C
        new_state = tf.concat([tf.real(new_state_c), tf.imag(new_state_c)], 1) # [batch_sz, 2*num_units] R
        # outside network (last dense layer) is ready for 2*num_units -> num_out
        output = new_state
        # print("cell.call output:", output.shape, output.dtype)
        # print("cell.call new_state:", new_state.shape, new_state.dtype)

        return output, new_state
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def create_network():
    dp = tflearn.data_preprocessing.DataPreprocessing()
    dp.add_featurewise_zero_center()
    dp.add_featurewise_stdnorm()
    #dp.add_samplewise_zero_center()
    #dp.add_samplewise_stdnorm()

    network = tflearn.input_data(shape=[None, chunk_size])#, data_preprocessing=dp)

    # input is a real signal
    network = tf.complex(network, 0.0)

    # fft the input
    input_fft = tf.fft(network)
    input_orig_fft = input_fft
    input_fft = tf.stack([tf.real(input_fft), tf.imag(input_fft)], axis=2)
    fft_size = int(input_fft.shape[1])
    network = input_fft
    print("fft shape: " + str(input_fft.get_shape()))

    omg = fft_size

    nn_reg = None

    mask = network

    mask = tflearn.layers.fully_connected(mask, omg*2, activation="tanh", regularizer=nn_reg)
    mask = tflearn.layers.normalization.batch_normalization(mask)

    mask = tflearn.layers.fully_connected(mask, omg, activation="tanh", regularizer=nn_reg)
    mask = tflearn.layers.normalization.batch_normalization(mask)

    mask = tflearn.layers.fully_connected(mask, omg/2, activation="tanh", regularizer=nn_reg)
    mask = tflearn.layers.normalization.batch_normalization(mask)

    #mask = tflearn.layers.fully_connected(mask, omg/4, activation="tanh")
    mask = tflearn.reshape(mask, [-1, 1, omg/2])
    mask = tflearn.layers.recurrent.lstm(mask, omg/4)

    mask = tflearn.layers.fully_connected(mask, omg/2, activation="tanh", regularizer=nn_reg)
    mask = tflearn.layers.normalization.batch_normalization(mask)

    mask = tflearn.layers.fully_connected(mask, omg, activation="tanh", regularizer=nn_reg)
    mask = tflearn.layers.normalization.batch_normalization(mask)

    mask = tflearn.layers.fully_connected(mask, omg*2, activation="tanh", regularizer=nn_reg)
    mask = tflearn.layers.normalization.batch_normalization(mask)

    mask = tflearn.layers.fully_connected(mask, omg, activation="sigmoid", regularizer=nn_reg)

    real = tf.multiply(tf.real(input_orig_fft), mask)
    imag = tf.multiply(tf.imag(input_orig_fft), mask)    
    network = tf.real(tf.ifft(tf.complex(real, imag)))

    print("final shape: " + str(network.get_shape()))

    network = tflearn.regression(network, optimizer="adam", learning_rate=learning_rate, loss="mean_square")

    return network