Python tensorflow 模块,self_adjoint_eig() 实例源码

我们从Python开源项目中,提取了以下5个代码示例,用于说明如何使用tensorflow.self_adjoint_eig()

项目:reslearn    作者:mackcmillion    | 项目源码 | 文件源码
def _covariance_final_ops(sum_squares, total):
    # http://www.johnloomis.org/ece563/notes/covar/covar.html
    total = total - tf.constant(1.0)
    total_3x3 = tf.reshape(tf.tile(tf.expand_dims(total, 0), [9]), [3, 3])
    covariance = tf.div(sum_squares, total_3x3)

    variance = tf.gather(tf.reshape(covariance, [-1]), [0, 4, 8])

    # eigenvalues and eigenvectors for PCA
    eigens = tf.self_adjoint_eig(covariance)
    eigenvalues = tf.slice(eigens, [0, 0], [-1, 1])
    eigenvectors = tf.slice(eigens, [1, 0], [-1, -1])

    return tf.sqrt(variance), eigenvalues, eigenvectors
项目:tfdeploy    作者:riga    | 项目源码 | 文件源码
def test_SelfAdjointEigV2(self):
        t = tf.self_adjoint_eig(np.array(3 * [3, 2, 2, 1]).reshape(3, 2, 2).astype("float32"))
        # the order of eigen vectors and values may differ between tf and np, so only compare sum
        # and mean
        # also, different numerical algorithms are used, so account for difference in precision by
        # comparing numbers with 4 digits
        self.check(t, ndigits=4, stats=True, abs=True)
项目:style_transfer    作者:VinceMarron    | 项目源码 | 文件源码
def describe_style(self, style_image, eval_out=False, pool_type='avg', last_layer='conv5_4'):
    """ Runs the 'style_image' through the vgg network and extracts a statistical
    description of the activations at convolution layers

    Args:
        style_image (PIL image object): displays the style to be transferred
        eval_out (bool): wether to open tf session and eval style description to np array
        pool_type (str): 'avg', 'max', or 'none', type of pooling to use
        last_layer (str): vgg network will process image up to this layer

    """
    with self.graph.as_default():

      self.style_desc = {}
      self.style_arr = tf.constant((np.expand_dims(style_image,0)[:,:,:,:3])
                                   .astype('float32'))

      x = self.style_arr-self.mean_pixel

      self.stop = self.all_layers.index(last_layer)+1

      for i, layer in enumerate(self.all_layers[:self.stop]):

        if layer[:2] == 're': x = tf.nn.relu(x)

        elif layer[:2] == 'po': x = self.pool_func(x, pool_type)

        elif layer[:2] == 'co':
          kernel = self.vgg_ph[layer+'_kernel']
          bias = self.vgg_ph[layer+'_bias']

          x = tf.nn.bias_add(tf.nn.conv2d(x, kernel,
                                          strides=(1, 1, 1, 1),
                                          padding='SAME'),bias)

          layer_shape = tf.shape(x, out_type=tf.int32)

          #flattens image tensor to (#pixels x #channels) assumes batch=1
          #treats each pixel as an observation of Gaussian random vector
          #in R^(#channels) and infers parameters
          stl_activs = tf.reshape(x, [layer_shape[1]*layer_shape[2], layer_shape[3]])
          mean_stl_activs = tf.reduce_mean(stl_activs, axis=0, keep_dims=True)
          covar_stl_activs = (tf.matmul(stl_activs - mean_stl_activs,
                                        stl_activs - mean_stl_activs, transpose_a=True)/
                              tf.cast(layer_shape[1]*layer_shape[2], tf.float32))

          #takes root of covar_stl_activs
          #(necessary for wdist, as tf cannot take eig of non-symmetric matrices)
          eigvals,eigvects = tf.self_adjoint_eig(covar_stl_activs)
          eigval_mat = tf.diag(tf.sqrt(tf.maximum(eigvals,0.)))
          root_covar_stl_activs = tf.matmul(tf.matmul(eigvects, eigval_mat)
                                                ,eigvects,transpose_b=True)

          trace_covar_stl = tf.reduce_sum(tf.maximum(eigvals,0))

          self.style_desc[layer] = (mean_stl_activs,
                                    trace_covar_stl,
                                    root_covar_stl_activs)
      if eval_out==True:
        with tf.Session(graph=self.graph, config=self.config) as sess:
          self.style_desc = sess.run(self.style_desc, feed_dict=self.feed_dict)
项目:MGP-RNN    作者:jfutoma    | 项目源码 | 文件源码
def Lanczos(Sigma_func,b):
    """ Lanczos method to approximate Sigma^1/2 * b, with b random vec

    Note: this only gives you a single draw though, which may not be ideal.

    Inputs:
        Sigma_func: function to premultiply a vector by Sigma, which you 
            might not want to explicitly construct if it's huge.
        b: random vector of N(0,1)'

    Returns:
        random vector approximately equal to Sigma^1/2 * b
    """
    n = tf.shape(b)[0]
    k = tf.div(n,500) + 3 #this many Lanczos iterations

    betas = tf.zeros(1)
    alphas = tf.zeros(0)
    D = tf.zeros((n,1))

    b_norm = tf.norm(b)
    D = tf.concat([D,tf.reshape(b/b_norm,[-1,1])],1)

    def cond(j,alphas,betas,D):
        return j < k+1

    def body(j,alphas,betas,D):     
        d_j = tf.slice(D,[0,j],[-1,1])
        d = Sigma_func(d_j) - tf.slice(betas,[j-1],[1])*tf.slice(D,[0,j-1],[-1,1]) 
        alphas = tf.concat([alphas,[dot(d_j,d)]],0)
        d = d - tf.slice(alphas,[j-1],[1])*d_j
        betas = tf.concat([betas,[tf.norm(d)]],0)
        D = tf.concat([D,d/tf.slice(betas,[j],[1])],1)
        return j+1,alphas,betas,D

    j = tf.constant(1)
    j,alphas,betas,D = tf.while_loop(cond,body,loop_vars=[j,alphas,betas,D],
        shape_invariants=[j.get_shape(),tf.TensorShape([None]),
                          tf.TensorShape([None]),tf.TensorShape([None,None])])

    betas_ = tf.diag(tf.slice(betas,[1],[k-1]))
    D_ = tf.slice(D,[0,1],[-1,k])

    #build out tridiagonal H: alphas_1:k on main, betas_2:k on off 
    H = tf.diag(alphas) + tf.pad(betas_,[[1,0],[0,1]]) + tf.pad(betas_,[[0,1],[1,0]])

    e,v = tf.self_adjoint_eig(H)
    e_pos = tf.maximum(0.0,e)+1e-6 #make sure positive definite 
    e_sqrt = tf.diag(tf.sqrt(e_pos))
    sq_H = tf.matmul(v,tf.matmul(e_sqrt,tf.transpose(v)))

    out = b_norm*tf.matmul(D_,sq_H) 
    return tf.slice(out,[0,0],[-1,1]) #grab last column = *e_1
项目:MGP-RNN    作者:jfutoma    | 项目源码 | 文件源码
def block_Lanczos(Sigma_func,B_,n_mc_smps):
    """
    block Lanczos method to approx Sigma^1/2 * B, with B matrix of N(0,1)'s.
    Used to generate multiple approximate large normal draws.

    """
    n = tf.shape(B_)[0] #dim of the multivariate normal
    s = n_mc_smps #number of samples to draw
    k = tf.div(n,500) + 3 #number of Lanczos iterations

    betas = tf.zeros([1,s])
    alphas = tf.zeros([0,s])
    D = tf.zeros([s,n,1])
    B_norms = tf.norm(B_,axis=0)
    D = tf.concat([D,tf.expand_dims(tf.transpose(B_/B_norms),2)],2)

    def cond(j,alphas,betas,D):
        return j < k+1

    #TODO: use block-CG in place of Sigma
    def body(j,alphas,betas,D):  
        d_j = tf.squeeze(tf.slice(D,[0,0,j],[-1,-1,1]))
        d = Sigma_func(tf.transpose(d_j)) - (tf.slice(betas,[j-1,0],[1,-1])*
                tf.transpose(tf.squeeze(tf.slice(D,[0,0,j-1],[-1,-1,1]))))
        alphas = tf.concat([alphas,[tf.diag_part(tf.matmul(d_j,d))]],0)
        d = d - tf.slice(alphas,[j-1,0],[1,-1])*tf.transpose(d_j)
        betas = tf.concat([betas,[tf.norm(d,axis=0)]],0)
        D = tf.concat([D,tf.expand_dims(tf.transpose(d/tf.slice(betas,[j,0],[1,-1])),2)],2)
        return j+1,alphas,betas,D

    j = tf.constant(1)
    j,alphas,betas,D = tf.while_loop(cond,body,loop_vars=[j,alphas,betas,D],
        shape_invariants=[j.get_shape(),tf.TensorShape([None,None]),
                          tf.TensorShape([None,None]),tf.TensorShape([None,None,None])])

    D_ = tf.slice(D,[0,0,1],[-1,-1,k])

    ##TODO: replace loop
    H = tf.zeros([0,k,k])

    for ss in range(s):
        this_beta = tf.diag(tf.squeeze(tf.slice(betas,[1,ss],[k-1,1])))
        #build out tridiagonal H: alphas_1:k on main, betas_2:k on off 
        this_H = (tf.diag(tf.squeeze(tf.slice(alphas,[0,ss],[-1,1]))) +
                  tf.pad(this_beta,[[1,0],[0,1]]) +
                   tf.pad(this_beta,[[0,1],[1,0]]))
        H = tf.concat([H,tf.expand_dims(this_H,0)],0)    

    E,V = tf.self_adjoint_eig(H)
    E_sqrt = tf.zeros([0,k,k])
    #TODO: replace loop
    for ss in range(s): 
        #ensure positive definite
        E_sqrt = tf.concat([E_sqrt,tf.expand_dims(tf.diag(tf.squeeze(tf.sqrt(tf.maximum(tf.slice(E,[ss,0],[1,-1]),1e-6)))),0)],0)
    sq_H = tf.matmul(V,tf.matmul(E_sqrt,tf.transpose(V,perm=[0,2,1])))

    e1 = tf.expand_dims(tf.transpose(tf.tile(tf.slice(tf.eye(k),[0,0],[-1,1]),[1,s])),2)
    out = B_norms*tf.transpose(tf.squeeze(tf.matmul(D_,tf.matmul(sq_H,e1))))
    return out