Python theano.tensor 模块,tensor4() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.tensor4()

项目:discgen    作者:vdumoulin    | 项目源码 | 文件源码
def create_training_computation_graphs():
    x = tensor.tensor4('features')
    y = tensor.imatrix('targets')

    convnet, mlp = create_model_bricks()
    y_hat = mlp.apply(convnet.apply(x).flatten(ndim=2))
    cost = BinaryCrossEntropy().apply(y, y_hat)
    accuracy = 1 - tensor.neq(y > 0.5, y_hat > 0.5).mean()
    cg = ComputationGraph([cost, accuracy])

    # Create a graph which uses batch statistics for batch normalization
    # as well as dropout on selected variables
    bn_cg = apply_batch_normalization(cg)
    bricks_to_drop = ([convnet.layers[i] for i in (5, 11, 17)] +
                      [mlp.application_methods[1].brick])
    variables_to_drop = VariableFilter(
        roles=[OUTPUT], bricks=bricks_to_drop)(bn_cg.variables)
    bn_dropout_cg = apply_dropout(bn_cg, variables_to_drop, 0.5)

    return cg, bn_dropout_cg
项目:deep-prior    作者:moberweger    | 项目源码 | 文件源码
def __init__(self, rng, inputVar=None, cfgParams=None):
        """

        :type cfgParams: DescriptorNetParams
        """

        if cfgParams is None:
            raise Exception("Cannot create a Net without config parameters (ie. cfgParams==None)")

        if inputVar is None:
            inputVar = T.tensor4('x')  # input variable
        elif isinstance(inputVar, str):
            inputVar = T.tensor4(inputVar)  # input variable

        # create structure
        super(PoseRegNet, self).__init__(rng, inputVar, cfgParams)
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def get_eval_fn(model, in3D=False, use_dice=False):
    """Compile the evaluation function of the model."""
    if use_dice:
        insec = T.sum(model.trg * model.output, axis=1)
        tmp = 1 - 2.0 * insec/(T.sum(model.trg, axis=1) + T.sum(model.output,
                               axis=1))
        error = T.mean(tmp)
    else:
        error = T.mean(T.mean(T.power(model.output - model.trg, 2), axis=1))
    if in3D:
        x = T.tensor4('x')
    else:
        x = T.fmatrix("x")
    y = T.fmatrix("y")

    theano_arg_vl = [x, y]
    output_fn_vl = [error, model.output]

    eval_fn = theano.function(
        theano_arg_vl, output_fn_vl,
        givens={model.x: x,
                model.trg: y})

    return eval_fn
项目:reinforcement_learning    作者:andreweskeclarke    | 项目源码 | 文件源码
def compile(self):
        x_train = T.tensor4('x_train')
        actions_train = T.matrix('actions_train')
        y_train = T.matrix('y_train')
        cost_function = self.squared_error(x_train, actions_train, y_train)
        self.train_function = theano.function([x_train, actions_train, y_train],
                                cost_function,
                                updates=self.sgd(cost_function, self.params),
                                on_unused_input='ignore',
                                allow_input_downcast=True)
        x_pred = T.tensor3('x_pred')
        actions_pred = T.vector('actions_pred')
        output_function = self.output(x_pred, actions_pred)
        self.predict_function = theano.function([x_pred, actions_pred],
                                                output_function,
                                                on_unused_input='ignore',
                                                allow_input_downcast=True)
        return self
项目:sesame-paste-noodle    作者:aissehust    | 项目源码 | 文件源码
def test_conv2d_dropconnect(self):
        conv2d = Conv2d(filter_size=(3,3), feature_map_multiplier=20, dc=0.5)
        x = np.asarray(rng.uniform(low=-1, high=1, size=(500, 1 ,28, 28)))

        size = conv2d.forwardSize([(500, 1 ,28, 28)])       
        input_x = T.tensor4()
        y = conv2d.forward([input_x,])[0]

        w_shape = conv2d.w.eval().shape
        w_number = w_shape[0]*w_shape[1]*w_shape[2]*w_shape[3]
        new_w = conv2d.w.eval().reshape(w_number)
        counter = 0
        for x in range(w_number):
            if abs(new_w[x]) == 0:
                counter=counter+1
        self.assertTrue(abs(round(counter/w_number,1)-conv2d.dc) < 0.2)
项目:reseg    作者:fvisin    | 项目源码 | 文件源码
def local_mean_subtraction(input, kernel_size=5):

    input_shape = (input.shape[0], 1, input.shape[1], input.shape[2])
    input = input.reshape(input_shape).astype(floatX)

    X = T.tensor4(dtype=floatX)
    filter_shape = (1, 1, kernel_size, kernel_size)
    filters = mean_filter(kernel_size).reshape(filter_shape)
    filters = shared(_asarray(filters, dtype=floatX), borrow=True)

    mean = conv2d(input=X,
                  filters=filters,
                  input_shape=input.shape,
                  filter_shape=filter_shape,
                  border_mode='half')
    new_X = X - mean
    f = function([X], new_X)
    return f(input)
项目:pl-cnn    作者:oval-group    | 项目源码 | 文件源码
def compile_maxpool(output_shape, pool_size):

    X = T.tensor4()

    # compute output with both methods
    out1 = T.signal.pool.pool_2d(X, pool_size,
                                 ignore_border=True, st=None,
                                 padding=(0, 0), mode='max')

    out2 = my_pool_2d(X, pool_size,
                      ignore_border=True, st=None,
                      padding=(0, 0), mode='max')

    # compute gradient with random incoming gradient for both cases
    incoming_grad = T.as_tensor_variable(np.random.random(size=output_shape)
                                         .astype(np.float32))
    grad1 = T.grad(None, wrt=X, known_grads={out1: incoming_grad})
    grad2 = T.grad(None, wrt=X, known_grads={out2: incoming_grad})

    return theano.function([X], [out1, out2, grad1, grad2])
项目:pl-cnn    作者:oval-group    | 项目源码 | 文件源码
def input_batch(layer):

    idx = T.iscalar()
    X = T.tensor4()

    layer_input = lasagne.layers.get_output(layer.input_layer, X,
                                            deterministic=True)
    layer_input = layer_input.flatten(2) if layer_input.ndim > layer.inp_ndim \
        else layer_input

    b_size = X.shape[0]
    X_layer = T.set_subtensor(layer.X_layer[idx, :b_size, :], layer_input)

    updates = [(layer.X_layer, X_layer)]

    return theano.function([idx, X], updates=updates)
项目:pl-cnn    作者:oval-group    | 项目源码 | 文件源码
def compile_update_svm(nnet, layer):

    assert layer.issvm and not Cfg.store_on_gpu

    X = T.tensor4()
    y = T.ivector()

    XX = layer.get_input_for(X)
    if XX.ndim > 2:
        XX = XX.flatten(2)

    dW, db, loss = grad_svm(nnet, layer, XX, y)

    updates = _update_cps(nnet=nnet, layer=layer,
                          X=XX, dW=dW, db=db, loss=loss)

    return theano.function([X, y],
                           updates=updates,
                           profile=nnet.profile)
项目:pl-cnn    作者:oval-group    | 项目源码 | 文件源码
def compile_update_conv(nnet, layer):

    assert layer.isconv and Configuration.store_on_gpu

    X = T.tensor4("X")
    y = T.ivector("y")
    idx = T.iscalar("idx")

    dW, db, loss = grad_conv(nnet=nnet, layer=layer, X=X, y=y)

    updates = _update_std(nnet=nnet, layer=layer,
                          dW=dW, db=db, loss=loss,
                          idx=idx)

    return theano.function([idx, X, y],
                           updates=updates,
                           profile=nnet.profile)
项目:3D-R2N2    作者:chrischoy    | 项目源码 | 文件源码
def __init__(self, random_seed=dt.datetime.now().microsecond, compute_grad=True):
        self.rng = np.random.RandomState(random_seed)

        self.batch_size = cfg.CONST.BATCH_SIZE
        self.img_w = cfg.CONST.IMG_W
        self.img_h = cfg.CONST.IMG_H
        self.n_vox = cfg.CONST.N_VOX
        self.compute_grad = compute_grad

        # (self.batch_size, 3, self.img_h, self.img_w),
        # override x and is_x_tensor4 when using multi-view network
        self.x = tensor.tensor4()
        self.is_x_tensor4 = True

        # (self.batch_size, self.n_vox, 2, self.n_vox, self.n_vox),
        self.y = tensor5()

        self.activations = []  # list of all intermediate activations
        self.loss = []  # final loss
        self.output = []  # final output
        self.error = []  # final output error
        self.params = []  # all learnable params
        self.grads = []  # will be filled out automatically
        self.setup()
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        rng=np.random.RandomState(1234)
        lasagne.random.set_rng(rng)

        # Prepare Theano variables for inputs and targets
        self.noise_var = T.matrix('noise')
        self.input_var = T.tensor4('inputs')

        # Create neural network model
        generator = build_generator(self.noise_var)
        critic = build_critic(self.input_var)

        # Create expression for passing real data through the critic
        self.real_out = lasagne.layers.get_output(critic)
        # Create expression for passing fake data through the critic
        self.fake_out = lasagne.layers.get_output(critic,
                lasagne.layers.get_output(generator))

        # Create update expressions for training
        self.generator_params = lasagne.layers.get_all_params(generator, trainable=True)
        self.critic_params = lasagne.layers.get_all_params(critic, trainable=True)
        self.generator = generator
        self.critic = critic
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        rng=np.random.RandomState(1234)
        lasagne.random.set_rng(rng)

        # Prepare Theano variables for inputs and targets
        self.noise_var = T.matrix('noise')
        self.input_var = T.tensor4('inputs')

        # Create neural network model
        generator = build_generator(self.noise_var,self.verbose)
        critic = build_critic(self.input_var,self.verbose)

        # Create expression for passing real data through the critic
        self.real_out = lasagne.layers.get_output(critic)
        # Create expression for passing fake data through the critic
        self.fake_out = lasagne.layers.get_output(critic,
                lasagne.layers.get_output(generator))

        # Create update expressions for training
        self.generator_params = lasagne.layers.get_all_params(generator, trainable=True)
        self.critic_params = lasagne.layers.get_all_params(critic, trainable=True)
        self.generator = generator
        self.critic = critic
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        rng=np.random.RandomState(1234)
        lasagne.random.set_rng(rng)

        # Prepare Theano variables for inputs and targets
        self.noise_var = T.matrix('noise')
        self.input_var = T.tensor4('inputs')

        # Create neural network model
        generator = build_generator(self.noise_var)
        critic = build_critic(self.input_var)

        # Create expression for passing real data through the critic
        self.real_out = lasagne.layers.get_output(critic)
        # Create expression for passing fake data through the critic
        self.fake_out = lasagne.layers.get_output(critic,
                lasagne.layers.get_output(generator))


        # Create update expressions for training
        self.generator_params = lasagne.layers.get_all_params(generator, trainable=True)
        self.critic_params = lasagne.layers.get_all_params(critic, trainable=True)
        self.generator = generator
        self.critic = critic
项目:luna16    作者:gzuidhof    | 项目源码 | 文件源码
def __init__(self):
        metric_names = ['Loss','L2','Accuracy','Dice']
        super(UNetTrainer, self).__init__(metric_names)

        input_var = T.tensor4('inputs')
        target_var = T.tensor4('targets', dtype='int64')
        weight_var = T.tensor4('weights')


        logging.info("Defining network")
        net_dict = unet.define_network(input_var)
        self.network = net_dict['out']
        train_fn, val_fn, l_r = unet.define_updates(self.network, input_var, target_var, weight_var)

        self.train_fn = train_fn
        self.val_fn = val_fn
        self.l_r = l_r
项目:LasagneNLP    作者:XuezheMax    | 项目源码 | 文件源码
def test():
    energies_var = T.tensor4('energies', dtype=theano.config.floatX)
    targets_var = T.imatrix('targets')
    masks_var = T.matrix('masks', dtype=theano.config.floatX)
    layer_input = lasagne.layers.InputLayer([2, 2, 3, 3], input_var=energies_var)
    out = lasagne.layers.get_output(layer_input)
    loss = crf_loss(out, targets_var, masks_var)
    prediction, acc = crf_accuracy(energies_var, targets_var)

    fn = theano.function([energies_var, targets_var, masks_var], [loss, prediction, acc])

    energies = np.array([[[[10, 15, 20], [5, 10, 15], [3, 2, 0]], [[5, 10, 1], [5, 10, 1], [5, 10, 1]]],
                         [[[5, 6, 7], [2, 3, 4], [2, 1, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0]]]], dtype=np.float32)

    targets = np.array([[0, 1], [0, 2]], dtype=np.int32)

    masks = np.array([[1, 1], [1, 0]], dtype=np.float32)

    l, p, a = fn(energies, targets, masks)
    print l
    print p
    print a
项目:pyextremelm    作者:tobifinn    | 项目源码 | 文件源码
def _generate_conv(self, image_shape=None):
        input = T.tensor4(name='input')
        W = theano.shared(np.asarray(self.weights['input'], dtype=input.dtype),
                          name='W')
        conv_out = T.nnet.conv2d(input, W,
                                 border_mode=self.pad,
                                 subsample=self.stride,
                                 filter_shape=self.filter_shape,
                                 input_shape=image_shape)
        if self.bias:
            b = theano.shared(
                np.asarray(self.weights['bias'], dtype=input.dtype),
                name='b')
            conv_out = conv_out + b.dimshuffle('x', 0, 'x', 'x')
        if self.activation_fct is None:
            output = conv_out
        elif self.activation_fct == "hardlimit":
            output = conv_out>0
        elif self.activation_fct == "hardtanh":
            output = T.switch(conv_out > -1, T.switch(conv_out > 1, 1, conv_out), -1)
        else:
            output = self.activation_fct(conv_out)
        self.conv_fct = theano.function([input], output)
项目:pyextremelm    作者:tobifinn    | 项目源码 | 文件源码
def _generate_conv(self, image_shape=None):
        input = T.tensor4(name='input')
        W = theano.shared(np.asarray(self.weights['input'], dtype=input.dtype),
                          name='W')
        conv_out = T.nnet.conv2d(input, W,
                               border_mode=self.pad,
                               subsample=self.stride,
                               filter_shape=self.weights['input'].shape,
                               input_shape=image_shape)
        if self.activation_fct is None:
            output = conv_out
        elif self.activation_fct == "hardlimit":
            output = conv_out>0
        elif self.activation_fct == "hardtanh":
            output = T.switch(conv_out > -1, T.switch(conv_out > 1, 1, conv_out), -1)
        else:
            output = self.activation_fct(conv_out)
        self.conv_fct = theano.function([input], output)
项目:feature-extraction-for-signature-verification    作者:amitadate    | 项目源码 | 文件源码
def get_feature_vector(self, image, layer='fc2'):
        """Propagates forward until ariving on a desired layer for each input image
        Parameters:
            image (numpy.ndarray): The input image
            layer (str): The desired output layer
        """
        assert len(image.shape) == 2, "2 dimensional input: H x W"

        input = image[np.newaxis, np.newaxis]

        # Propogating forward to desired layer, caching that function
        if layer not in self.forward_util_layer:
            inputs = T.tensor4('inputs')
            outputs = lasagne.layers.get_output(self.model[layer],
                                                inputs=inputs,
                                                deterministic=True)
            self.forward_util_layer[layer] = theano.function([inputs], outputs)

        #continuing propagating forward
        out = self.forward_util_layer[layer](input)
        return out
项目:deep-prior-pp    作者:moberweger    | 项目源码 | 文件源码
def __init__(self, rng, inputVar=None, cfgParams=None):
        """

        :type cfgParams: DescriptorNetParams
        """
        import theano.tensor as T

        if cfgParams is None:
            raise Exception("Cannot create a Net without config parameters (ie. cfgParams==None)")

        if inputVar is None:
            inputVar = T.tensor4('x')  # input variable
        elif isinstance(inputVar, str):
            inputVar = T.tensor4(inputVar)  # input variable

        # create structure
        super(PoseRegNet, self).__init__(rng, inputVar, cfgParams)
项目:Cascade-CNN-Face-Detection    作者:gogolgrind    | 项目源码 | 文件源码
def __init__(self,nn_name,batch_size=1024,freeze=1,l_rates = sp.float32(0.05)*sp.ones(512,dtype=sp.float32),verbose = 1,subnet= None):
        self.nn_name = nn_name
        self.subnet = subnet
        if subnet != None and freeze:
            self.subnet.__freeze__()
        self.batch_size = batch_size
        self.verbose = verbose
        self.l_rates = l_rates
        self.__input_var__ = T.tensor4('X'+self.nn_name[:2])
        self.__target_var__ = T.ivector('y+'+self.nn_name[:2])
        self.max_epochs = self.l_rates.shape[0]
        if self.nn_name == '12-net':
            self.net = self.__build_12_net__()
        elif self.nn_name == '24-net':
            self.net = self.__build_24_net__()
        elif self.nn_name == '48-net':
            self.net = self.__build_48_net__()
        elif self.nn_name =='12-calib_net':
            self.net = self.__build_12_calib_net__()
        elif self.nn_name =='24-calib_net':
            self.net = self.__build_24_calib_net__()
        elif self.nn_name =='48-calib_net':
            self.net = self.__build_48_calib_net__()
        self.__build_loss_train__fn__()
项目:DBQA-KBQA    作者:Lucien-qiang    | 项目源码 | 文件源码
def test_kmax_pool():
  nbatches, nkernels_in, nwords, ndim = 2, 1, 5, 3
  input_shape = (nbatches, nkernels_in, nwords, ndim)

  input = T.tensor4('input')

  k = 3
  f_kmax = theano.function([input], k_max_pooling(input, k))
  f_max = theano.function([input], max_pooling(input))

  image_data = np.arange(np.prod(input_shape), dtype=np.float64)
  np.random.shuffle(image_data)
  image_data = image_data.reshape(input_shape)
  print image_data
  print 'kmax'
  print f_kmax(image_data)
  print 'max'
  print f_max(image_data)
项目:DBQA-KBQA    作者:Lucien-qiang    | 项目源码 | 文件源码
def test_kmax_pooling_time():
  nbatches, nkernels_in, nwords, ndim = 50, 16, 58, 300
  input_shape = (nbatches, nkernels_in, nwords, ndim)

  input = T.tensor4('input')

  k = 1
  f_kmax_argsort = theano.function([input], k_max_pooling(input, k))
  f_kmax_unroll = theano.function([input], _k_max_pooling(input, k))
  f_max = theano.function([input], max_pooling(input))

  image_data = np.random.randn(*input_shape).astype(dtype=np.float64)
  # np.random.shuffle(image_data)
  image_data = image_data.reshape(input_shape)
  # print image_data
  # print 'kmax'
  print 'f_kmax_argsort', timeit.timeit(lambda: f_kmax_argsort(image_data), number=10)
  print 'f_kmax_unroll', timeit.timeit(lambda: f_kmax_unroll(image_data), number=10)
  print 'f_max', timeit.timeit(lambda: f_max(image_data), number=10)
项目:kaggle_dsb    作者:syagev    | 项目源码 | 文件源码
def __init__(self):
        metric_names = ['Loss','L2','Accuracy','Dice']
        super(UNetTrainer, self).__init__(metric_names)

        input_var = T.tensor4('inputs')
        target_var = T.tensor4('targets', dtype='int64')
        weight_var = T.tensor4('weights')


        logging.info("Defining network")
        net_dict = unet.define_network(input_var)
        self.network = net_dict['out']
        train_fn, val_fn, l_r = unet.define_updates(self.network, input_var, target_var, weight_var)

        self.train_fn = train_fn
        self.val_fn = val_fn
        self.l_r = l_r
项目:chordrec    作者:fdlm    | 项目源码 | 文件源码
def build_net(in_shape, out_size, model):
    # input variables
    input_var = (tt.tensor4('input', dtype='float32')
                 if len(in_shape) > 1 else
                 tt.tensor3('input', dtype='float32'))
    target_var = tt.tensor3('target_output', dtype='float32')
    mask_var = tt.matrix('mask_input', dtype='float32')

    # stack more layers
    network = lnn.layers.InputLayer(
        name='input', shape=(None, None) + in_shape,
        input_var=input_var
    )

    mask_in = lnn.layers.InputLayer(name='mask',
                                    input_var=mask_var,
                                    shape=(None, None))

    network = spg.layers.CrfLayer(
        network, mask_input=mask_in, num_states=out_size, name='CRF')

    return network, input_var, target_var, mask_var
项目:DEEP-CLICK-MODEL    作者:THUIR    | 项目源码 | 文件源码
def test_kmax_pool():
  nbatches, nkernels_in, nwords, ndim = 2, 1, 5, 3
  input_shape = (nbatches, nkernels_in, nwords, ndim)

  input = T.tensor4('input')

  k = 3
  f_kmax = theano.function([input], k_max_pooling(input, k))
  f_max = theano.function([input], max_pooling(input))

  image_data = np.arange(np.prod(input_shape), dtype=np.float64)
  np.random.shuffle(image_data)
  image_data = image_data.reshape(input_shape)
  print image_data
  print 'kmax'
  print f_kmax(image_data)
  print 'max'
  print f_max(image_data)
项目:DEEP-CLICK-MODEL    作者:THUIR    | 项目源码 | 文件源码
def test_kmax_pooling_time():
  nbatches, nkernels_in, nwords, ndim = 50, 16, 58, 300
  input_shape = (nbatches, nkernels_in, nwords, ndim)

  input = T.tensor4('input')

  k = 1
  f_kmax_argsort = theano.function([input], k_max_pooling(input, k))
  f_kmax_unroll = theano.function([input], _k_max_pooling(input, k))
  f_max = theano.function([input], max_pooling(input))

  image_data = np.random.randn(*input_shape).astype(dtype=np.float64)
  # np.random.shuffle(image_data)
  image_data = image_data.reshape(input_shape)
  # print image_data
  # print 'kmax'
  print 'f_kmax_argsort', timeit.timeit(lambda: f_kmax_argsort(image_data), number=10)
  print 'f_kmax_unroll', timeit.timeit(lambda: f_kmax_unroll(image_data), number=10)
  print 'f_max', timeit.timeit(lambda: f_max(image_data), number=10)
项目:ObjRecPoseEst    作者:paroj    | 项目源码 | 文件源码
def setupVariables(self):
        floatX = theano.config.floatX  # @UndefinedVariable


        # params
        self.learning_rate = T.scalar('learning_rate',dtype=floatX) 
        self.momentum = T.scalar('momentum',dtype=floatX)

        # input
        self.tvIndex = T.lscalar()  # index to a [mini]batch
        #self.tvIndex.tag.test_value = 10
        self.tvX = self.descrNet.inputVar

        # targets
        self.tvY = T.ivector('y')
        self.tvYr = T.tensor4('yr')
        self.tvPairIdx = T.imatrix('pairIdx')
        self.tvPairLabels = T.ivector('pairLabels')
        self.tvTripletIdx = T.imatrix('tripletIdx')
        self.tvTripletThresh = T.scalar('tripletThresh')
        self.tvTripletPoolIdx = T.imatrix('tripletPoolIdx')
        self.tvTripletPoolThresh = T.scalar('tripletPoolThresh')
        self.tvPosTripletPoolSize = T.iscalar('posTripletPoolSize')
        self.tvNegTripletPoolSize = T.iscalar('negTripletPoolSize')
项目:denet    作者:lachlants    | 项目源码 | 文件源码
def __init__(self):
        super().__init__()

        self.batch_size = 0
        self.iteration = 0
        self.class_labels = None
        self.data_shape = None
        self.class_num = 0
        self.rng_seed = random.randint(1,9999)
        denet.layer.set_rng_seed(self.rng_seed)

        #training parameters
        self.gradient_clip = 0.0
        self.skip_layer_updates = []
        self.bias_decay = False
        self.layers=[]
        self.distort_mode = []
        self.func = {}

        #input data
        self.input = tensor.tensor4("input")

    #input image shape
项目:denet    作者:lachlants    | 项目源码 | 文件源码
def test():
    from denet.layer import InitialLayer
    numpy.random.seed(1002)
    eps = 1e-4

    input = tensor.tensor4()
    input_shape = (64,128,32,32)
    bn = BatchNormLayer([InitialLayer(input, input_shape)])
    f = theano.function([input], bn.output, updates=bn.local_updates, givens=[(get_train(), tensor.cast(1, 'int8'))])
    x = numpy.random.uniform(0.0, 1.0, input_shape).astype(numpy.float32)
    y = f(x)
    x_mean = bn.mean.get_value()
    x_std = bn.std.get_value()

    import theano
    import numpy
    input = theano.tensor.tensor4()
    f = theano.function([input], input.mean())
    x = numpy.random.uniform(0.0, 1.0, (64,128,32,32)).astype(numpy.float32)
    print("Mean TEST = ", f(x))


    if abs(y.mean()) > eps or abs(y.std() - 1.0) > eps or abs(x_mean.mean() - x.mean()*0.1) > eps or abs(x_std.mean() - 1.24641) > eps:
        raise Exception("Batchnorm failed test! ", y.mean(), y.std(), x_mean.mean(), x_std.mean())
项目:saliency-salgan-2017    作者:imatge-upc    | 项目源码 | 文件源码
def __init__(self, input_width, input_height, batch_size=32):

        self.inputWidth = input_width
        self.inputHeight = input_height

        self.G_lr = None
        self.D_lr = None
        self.momentum = None

        self.net = None
        self.discriminator = None
        self.batch_size = batch_size

        self.D_trainFunction = None
        self.G_trainFunction = None
        self.predictFunction = None
        self.input_var = T.tensor4()
        self.output_var = T.tensor4()
项目:convnet-for-geometric-matching    作者:hjweide    | 项目源码 | 文件源码
def create_corr_func():
    import numpy as np
    Xa, Xb = T.tensor4('Xa'), T.tensor4('Xb')

    def correlation(A, B):
        Ap, Bp = A.reshape((-1, 15 * 15)), B.reshape((-1, 15 * 15))
        C = T.tensordot(Ap.T, Bp, axes=1).reshape((-1, 15, 15))
        return C

    result, updates = theano.scan(fn=correlation,
                                  outputs_info=None,
                                  sequences=[Xa, Xb],
                                  non_sequences=None)
    corr_func = theano.function(
        inputs=[Xa, Xb],
        outputs=result,
    )

    X = np.random.random((32, 128, 15, 15)).astype(np.float32)
    Y = np.random.random(X.shape).astype(np.float32)

    output = corr_func(X, Y)
    print output.shape
项目:convnet-for-geometric-matching    作者:hjweide    | 项目源码 | 文件源码
def create_infer_func(layers):
    Xa, Xb = T.tensor4('Xa'), T.tensor4('Xb')
    Xa_batch, Xb_batch = T.tensor4('Xa_batch'), T.tensor4('Xb_batch')

    Tp = get_output(
        layers['trans'],
        inputs={
            layers['inputa']: Xa, layers['inputb']: Xb,
        }, deterministic=True,
    )

    infer_func = theano.function(
        inputs=[theano.In(Xa_batch), theano.In(Xb_batch)],
        outputs=Tp,
        givens={
            Xa: Xa_batch, Xb: Xb_batch,  # Ia, Ib
        }
    )

    return infer_func
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_softmax_f16(self):
        x = T.matrix('x', 'float16')
        x_gpu = T.tensor4('x_gpu', 'float16')
        f_z = T.nnet.softmax_op
        f_gpu = dnn.GpuDnnSoftmax(
            'accurate',
            'channel'
        )

        def cmp(n, m, f, f_gpu):
            data = numpy.random.random((n, m)).astype('float16')
            gdata = numpy.asarray(data)[:, :, None, None]

            out = f(data)
            gout = numpy.asarray(f_gpu(gdata))[:, :, 0, 0]
            utt.assert_allclose(out, gout)

        self._test_softmax(x, x_gpu, f_z, f_gpu, cmp)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_max_pool_2d_2D_same_size(self):
        rng = numpy.random.RandomState(utt.fetch_seed())
        test_input_array = numpy.array([[[
            [1., 2., 3., 4.],
            [5., 6., 7., 8.]
        ]]]).astype(theano.config.floatX)
        test_answer_array = numpy.array([[[
            [0., 0., 0., 0.],
            [0., 6., 0., 8.]
        ]]]).astype(theano.config.floatX)
        input = tensor.tensor4(name='input')
        patch_size = (2, 2)
        op = max_pool_2d_same_size(input, patch_size)
        op_output = function([input], op)(test_input_array)
        utt.assert_allclose(op_output, test_answer_array)

        def mp(input):
            return max_pool_2d_same_size(input, patch_size)
        utt.verify_grad(mp, [test_input_array], rng=rng)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_flatten_lift():
    for i in xrange(1, 4):
        x = tensor.tensor4()
        out = tensor.flatten(T.exp(x), i)
        assert out.ndim == i
        mode = compile.mode.get_default_mode()
        mode = mode.including('local_reshape_lift')
        f = theano.function([x], out, mode=mode)
        x_np = numpy.random.rand(5, 4, 3, 2).astype(config.floatX)
        out_np = f(x_np)
        topo = f.maker.fgraph.toposort()
        shape_out_np = tuple(x_np.shape[:i-1])+(numpy.prod(x_np.shape[i-1:]),)
        assert shape_out_np == out_np.shape

        reshape_nodes = [n for n in topo if isinstance(n.op, tensor.Reshape)]
        assert (len(reshape_nodes) == 1 and
            tensor.is_flat(reshape_nodes[0].outputs[0], outdim=i))
        assert isinstance(topo[-1].op, tensor.Elemwise)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_batched_tensordot():
    first = theano.tensor.tensor4("first")
    second = theano.tensor.tensor4("second")
    axes = [[1, 2], [3, 1]]
    output = theano.tensor.basic.batched_tensordot(first, second, axes)
    first_val = numpy.random.rand(8, 10, 20, 3).astype(config.floatX)
    second_val = numpy.random.rand(8, 20, 5, 10).astype(config.floatX)
    result_fn = theano.function([first, second], output)
    result = result_fn(first_val, second_val)
    assert result.shape[0] == first_val.shape[0]
    assert result.shape[1] == first_val.shape[3]
    assert result.shape[2] == second_val.shape[2]

    first_mat = theano.tensor.dmatrix("first")
    second_mat = theano.tensor.dmatrix("second")
    axes = 1
    output = theano.tensor.basic.batched_tensordot(first_mat, second_mat, axes)
    first_mat_val = numpy.random.rand(10, 4).astype(config.floatX)
    second_mat_val = numpy.random.rand(10, 4).astype(config.floatX)
    result_fn = theano.function([first_mat, second_mat], output)
    result = result_fn(first_mat_val, second_mat_val)
    assert result.shape[0] == first_mat_val.shape[0]
    assert len(result.shape) == 1
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_tile_grad():

    def grad_tile(x, reps, np_x):
        y = tile(x, reps)
        z = y.sum()
        g = theano.function([x], grad(z, x))
        grad_res = g(np_x)
        # The gradient should be the product of the tiling dimensions
        # (since the gradients are additive through the tiling operation)
        assert numpy.all(grad_res == numpy.prod(reps))

    rng = numpy.random.RandomState(utt.fetch_seed())

    # test vector
    grad_tile(vector('x'), [3], rng.randn(5).astype(config.floatX))
    # test matrix
    grad_tile(matrix('x'), [3, 4], rng.randn(2, 3).astype(config.floatX))
    # test tensor3
    grad_tile(tensor3('x'), [3, 4, 5],
              rng.randn(2, 4, 3).astype(config.floatX))
    # test tensor4
    grad_tile(tensor4('x'), [3, 4, 5, 6],
              rng.randn(2, 4, 3, 5).astype(config.floatX))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_broadcast_grad():
    # rng = numpy.random.RandomState(utt.fetch_seed())
    x1 = T.tensor4('x')
    # x1_data = rng.randn(1, 1, 300, 300)
    sigma = T.scalar('sigma')
    # sigma_data = 20
    window_radius = 3

    filter_1d = T.arange(-window_radius, window_radius + 1)
    filter_1d = filter_1d.astype(theano.config.floatX)
    filter_1d = T.exp(-0.5 * filter_1d**2 / sigma ** 2)
    filter_1d = filter_1d / filter_1d.sum()

    filter_W = filter_1d.dimshuffle(['x', 'x', 0, 'x'])

    y = theano.tensor.nnet.conv2d(x1, filter_W, border_mode='full',
                                  filter_shape=[1, 1, None, None])
    theano.grad(y.sum(), sigma)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dtype_upcast(self):
        """
        Checks dtype upcast for CorrMM methods.
        """
        def rand(shape, dtype='float64'):
            r = numpy.asarray(numpy.random.rand(*shape), dtype=dtype)
            return r * 2 - 1

        ops = [corr.CorrMM, corr.CorrMM_gradWeights, corr.CorrMM_gradInputs]
        a_shapes = [[4, 5, 6, 3], [1, 5, 6, 3], [1, 5, 6, 3]]
        b_shapes = [[7, 5, 3, 2], [1, 5, 3, 1], [7, 1, 3, 1]]
        dtypes = ['float32', 'float64']

        for op, a_shape, b_shape in zip(ops, a_shapes, b_shapes):
            for a_dtype in dtypes:
                for b_dtype in dtypes:
                    c_dtype = theano.scalar.upcast(a_dtype, b_dtype)
                    a_tens = T.tensor4(dtype=a_dtype)
                    b_tens = T.tensor4(dtype=b_dtype)
                    a_tens_val = rand(a_shape, dtype=a_dtype)
                    b_tens_val = rand(b_shape, dtype=b_dtype)

                    c_tens = op()(a_tens, b_tens)
                    f = theano.function([a_tens, b_tens], c_tens, mode=self.mode)
                    assert_equals(f(a_tens_val, b_tens_val).dtype, c_dtype)
项目:pixel-rnn-lasagne    作者:taimir    | 项目源码 | 文件源码
def __init__(self, batch_size, image_shape, n_hidden):
        """
        :param batch_size: how many images to have in a single minibatch
        :param image_shape: (channels x height x width)
        :param n_hidden: number of hidden units in the MD-RNN
        """
        self.batch_size = batch_size
        self.input_channels = image_shape[0]
        self.h = n_hidden
        self.height = image_shape[1]
        self.width = image_shape[2]
        self.out_channels = n_hidden
        self.n_colors = image_shape[0]
        self.inputs = T.tensor4("images")
        self.labels = T.itensor4("labels")
        self.network, self.loss, self.output = self._define_network(self.inputs, self.labels)
        self._define_forward_passes(self.network, self.loss, self.output, self.inputs, self.labels)
项目:amdtk    作者:amdtkdev    | 项目源码 | 文件源码
def __init__(self, inputs, maxpool_height, maxpool_width):
        if inputs is None:
            self.inputs = T.tensor4(dtype=theano.config.floatX)
        else:
            self.inputs = inputs.flatten(4)
        self.maxpool_height = maxpool_height
        self.maxpool_width = maxpool_width
        self.outputs = pool.pool_2d(
            self.inputs,
            (maxpool_height, maxpool_width),
            ignore_border=True
        )

        # Pooling layer has no learnable parameters.
        self.params = []


# Possible layer types.
项目:ActiveBoundary    作者:MiriamHu    | 项目源码 | 文件源码
def decode(self, z):
        """

        :param z: of shape (n_dim,) or (n_dim, 1) or (n_samples, n_dim)
        :return:
        """
        try:
            z.shape[1]
        except IndexError:
            z = z.reshape(1, z.shape[0])
        if z.shape[1] == 1 and z.shape[0] > 1:
            z = np.transpose(z)
        z = z.reshape(z.shape[0], z.shape[1], 1, 1)
        zs = tensor.tensor4('z')
        decode = theano.function([zs], self.ali.decoder.apply(zs))
        return decode(z)
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def build(self):
        stack_size = self.input_shape[1]
        self.input = T.tensor4()
        self.W_shape = (self.nb_filter, stack_size, self.nb_row, self.nb_col)
        self.W = self.init(self.W_shape)
        self.b = shared_zeros((self.nb_filter,))
        self.params = [self.W, self.b]
        self.regularizers = []

        if self.W_regularizer:
            self.W_regularizer.set_param(self.W)
            self.regularizers.append(self.W_regularizer)

        if self.b_regularizer:
            self.b_regularizer.set_param(self.b)
            self.regularizers.append(self.b_regularizer)

        if self.activity_regularizer:
            self.activity_regularizer.set_layer(self)
            self.regularizers.append(self.activity_regularizer)

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
项目:RL4Data    作者:fyabc    | 项目源码 | 文件源码
def __init__(self,
                 n=None,
                 train_batch_size=None,
                 validate_batch_size=None):
        super(CIFARModel, self).__init__(train_batch_size, validate_batch_size)

        n = n or ParamConfig['n']

        self.learning_rate = theano.shared(lasagne.utils.floatX(ParamConfig['init_learning_rate']))

        # Prepare Theano variables for inputs and targets
        self.input_var = T.tensor4('inputs')
        self.target_var = T.ivector('targets')

        self.network = self.build_cnn(self.input_var, n)
        message("number of parameters in model: %d" % lasagne.layers.count_params(self.network, trainable=True))

        self.saved_init_parameters_values = get_all_param_values(self.network, trainable=True)

        self.build_train_function()
        self.build_validate_function()
项目:RL4Data    作者:fyabc    | 项目源码 | 文件源码
def __init__(self,
                 train_batch_size=None,
                 valid_batch_size=None
                 ):
        super(VanillaCNNModel, self).__init__(train_batch_size, valid_batch_size)

        # Prepare Theano variables for inputs and targets
        self.input_var = T.tensor4('inputs')
        self.target_var = T.ivector('targets')

        self.learning_rate = theano.shared(lasagne.utils.floatX(ParamConfig['init_learning_rate']))

        self.network = self.build_cnn(self.input_var)
        message("number of parameters in model: %d" % lasagne.layers.count_params(self.network, trainable=True))

        self.saved_init_parameters_values = get_all_param_values(self.network, trainable=True)

        self.build_train_function()
        self.build_validate_function()
项目:RL4Data    作者:fyabc    | 项目源码 | 文件源码
def __init__(self,
                 n=None,
                 train_batch_size=None,
                 validate_batch_size=None):
        super(ResNetTFModel, self).__init__(train_batch_size, validate_batch_size)

        n = n or ParamConfig['n']

        self.learning_rate = theano.shared(lasagne.utils.floatX(ParamConfig['init_learning_rate']))

        # Prepare Theano variables for inputs and targets
        self.input_var = T.tensor4('inputs')
        self.target_var = T.ivector('targets')

        self.network = self.build_cnn(self.input_var, n)
        message("number of parameters in model: %d" % lasagne.layers.count_params(self.network, trainable=True))

        self.saved_init_parameters_values = get_all_param_values(self.network, trainable=True)

        self.build_train_function()
        self.build_validate_function()
项目:deep-hashtagprediction    作者:jderiu    | 项目源码 | 文件源码
def test_kmax_pool():
  nbatches, nkernels_in, nwords, ndim = 2, 1, 5, 3
  input_shape = (nbatches, nkernels_in, nwords, ndim)

  input = T.tensor4('input')

  k = 3
  f_kmax = theano.function([input], k_max_pooling(input, k))
  f_max = theano.function([input], max_pooling(input))

  image_data = np.arange(np.prod(input_shape), dtype=np.float64)
  np.random.shuffle(image_data)
  image_data = image_data.reshape(input_shape)
  print image_data
  print 'kmax'
  print f_kmax(image_data)
  print 'max'
  print f_max(image_data)
项目:deep-hashtagprediction    作者:jderiu    | 项目源码 | 文件源码
def test_kmax_pooling_time():
  nbatches, nkernels_in, nwords, ndim = 50, 16, 58, 300
  input_shape = (nbatches, nkernels_in, nwords, ndim)

  input = T.tensor4('input')

  k = 1
  f_kmax_argsort = theano.function([input], k_max_pooling(input, k))
  f_kmax_unroll = theano.function([input], _k_max_pooling(input, k))
  f_max = theano.function([input], max_pooling(input))

  image_data = np.random.randn(*input_shape).astype(dtype=np.float64)
  # np.random.shuffle(image_data)
  image_data = image_data.reshape(input_shape)
  # print image_data
  # print 'kmax'
  print 'f_kmax_argsort', timeit.timeit(lambda: f_kmax_argsort(image_data), number=10)
  print 'f_kmax_unroll', timeit.timeit(lambda: f_kmax_unroll(image_data), number=10)
  print 'f_max', timeit.timeit(lambda: f_max(image_data), number=10)
项目:CQA-CNN    作者:3141bishwa    | 项目源码 | 文件源码
def test_kmax_pool():
  nbatches, nkernels_in, nwords, ndim = 2, 1, 5, 3
  input_shape = (nbatches, nkernels_in, nwords, ndim)

  input = T.tensor4('input')

  k = 3
  f_kmax = theano.function([input], k_max_pooling(input, k))
  f_max = theano.function([input], max_pooling(input))

  image_data = np.arange(np.prod(input_shape), dtype=np.float64)
  np.random.shuffle(image_data)
  image_data = image_data.reshape(input_shape)
  print image_data
  print 'kmax'
  print f_kmax(image_data)
  print 'max'
  print f_max(image_data)