Python theano.tensor 模块,round() 实例源码

我们从Python开源项目中,提取了以下39个代码示例,用于说明如何使用theano.tensor.round()

项目:DeepLearaning_TrafficFlowPrediction    作者:KarisM    | 项目源码 | 文件源码
def call(self, x, mask=None):
        if self.mode == 'maximum_likelihood':
            # draw maximum likelihood sample from Bernoulli distribution
            #    x* = argmax_x p(x) = 1         if p(x=1) >= 0.5
            #                         0         otherwise
            return T.round(x, mode='half_away_from_zero')
        elif self.mode == 'random':
            # draw random sample from Bernoulli distribution
            #    x* = x ~ p(x) = 1              if p(x=1) > uniform(0, 1)
            #                    0              otherwise
            return self.srng.binomial(size=x.shape, n=1, p=x, dtype=theano.config.floatX)
        elif self.mode == 'mean_field':
            # draw mean-field approximation sample from Bernoulli distribution
            #    x* = E[p(x)] = E[Bern(x; p)] = p
            return x
    elif self.mode == 'nrlu':
            return nrlu(x)
        else:
            raise NotImplementedError('Unknown sample mode!')
项目:DeepLearningTutorialForChinese    作者:zhaoyu611    | 项目源码 | 文件源码
def get_pseudo_likehood_cost(self,updates):
        #?????p{x_i|x{\i}}???i
        bit_i_idx=theano.shared(value=0,name='bit_i_idx')
        #??????????????????
        xi=T.round(self.input)
        #??bit????????
        fe_xi=self.free_energy(xi)
        #???xi?????x_i????????x_{\i}?
        # ??????xi[:,bit_i_idx]=1-xi[:,bit_i_idx]
        #???????xi[:,bit_i_idx]
        #??????xi_flip??????xi???
        xi_flip=T.set_subtensor(xi[:,bit_i_idx],1-xi[:,bit_i_idx])
        #??xi_flip????
        fe_xi_flip=self.free_energy(xi_flip)
        #?????e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i})))
        cost=T.mean(self.n_visible*T.log(T.nnet.sigmoid(fe_xi_flip-fe_xi)))
        #?bit_i_idx%number???updates???????????
        updates[bit_i_idx]=(bit_i_idx+1)%self.n_visible
        return cost
项目:sigma-delta    作者:petered    | 项目源码 | 文件源码
def compute_activations(self, input_data, do_round = True):
        layer_input = input_data
        layer_signals = []
        for i, (w, b, k) in enumerate(zip(self.ws, self.bs, self.get_scales())):
            scaled_input = layer_input*k
            if not do_round:
                eta=None
                spikes = scaled_input
            else:
                eta = tt.round(scaled_input) - scaled_input
                spikes = scaled_input + disconnected_grad(eta)
            nonlinearity = get_named_activation_function(self.hidden_activations if i<len(self.ws)-1 else self.output_activation)
            output = nonlinearity((spikes/k).dot(w)+b)
            layer_signals.append({'input': layer_input, 'scaled_input': scaled_input, 'eta': eta, 'spikes': spikes, 'output': output})
            layer_input = output
        return layer_signals
项目:sigma-delta    作者:petered    | 项目源码 | 文件源码
def get_all_signals(self, input_):
        scale = self.get_scale()
        scaled_input = input_*scale

        inc_phi = self.phi + scaled_input
        epsilon = tt.round(inc_phi) - inc_phi
        spikes = inc_phi + epsilon
        # spikes = tt.round(inc_phi)
        new_phi = inc_phi-spikes

        output = spikes / scale
        signals = dict(
            input=input_,
            scaled_input=scaled_input,
            spikes=spikes,
            epsilon=epsilon,
            output=output,
            )
        add_update(self.phi, new_phi)
        return signals
项目:pdnn    作者:petered    | 项目源码 | 文件源码
def herd(x, shape = None):
    phi = shared_like(x, name='phi') if shape is None else create_shared_variable(np.zeros(shape), name='phi{}'.format(shape))
    phi_ = phi + x
    s = tt.round(phi_)
    add_update(phi, phi_ - s)
    return s
项目:RBM-DBN-theano-DL4J    作者:lzhbrian    | 项目源码 | 文件源码
def get_pseudo_likelihood_cost(self, updates):
        """Stochastic approximation to the pseudo-likelihood"""

        # index of bit i in expression p(x_i | x_{\i})
        bit_i_idx = theano.shared(value=0, name='bit_i_idx')

        # binarize the input image by rounding to nearest integer
        xi = T.round(self.input)

        # calculate free energy for the given bit configuration
        fe_xi = self.free_energy(xi)

        # flip bit x_i of matrix xi and preserve all other bits x_{\i}
        # Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], but assigns
        # the result to xi_flip, instead of working in place on xi.
        xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx])

        # calculate free energy with bit flipped
        fe_xi_flip = self.free_energy(xi_flip)

        # equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i})))
        cost = T.mean(self.n_visible * T.log(T.nnet.sigmoid(fe_xi_flip -
                                                            fe_xi)))

        # increment bit_i_idx % number as part of updates
        updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible

        return cost
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def round(x):
    return T.round(x)
项目:DeepDTIs_DBN    作者:Bjoux2    | 项目源码 | 文件源码
def get_pseudo_likelihood_cost(self, updates):
        """Stochastic approximation to the pseudo-likelihood"""

        # index of bit i in expression p(x_i | x_{\i})
        bit_i_idx = theano.shared(value=0, name='bit_i_idx')

        # binarize the input image by rounding to nearest integer
        xi = T.round(self.input)

        # calculate free energy for the given bit configuration
        fe_xi = self.free_energy(xi)

        # flip bit x_i of matrix xi and preserve all other bits x_{\i}
        # Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], but assigns
        # the result to xi_flip, instead of working in place on xi.
        xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx])

        # calculate free energy with bit flipped
        fe_xi_flip = self.free_energy(xi_flip)

        # equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i})))
        cost = T.mean(self.n_visible * T.log(T.nnet.sigmoid(fe_xi_flip -
                                                            fe_xi)))

        # increment bit_i_idx % number as part of updates
        updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible

        return cost
项目:DeepDTIs_DBN    作者:Bjoux2    | 项目源码 | 文件源码
def get_pseudo_likelihood_cost(self, updates):
        """Stochastic approximation to the pseudo-likelihood"""

        # index of bit i in expression p(x_i | x_{\i})
        bit_i_idx = theano.shared(value=0, name='bit_i_idx')

        # binarize the input image by rounding to nearest integer
        xi = T.round(self.input)

        # calculate free energy for the given bit configuration
        fe_xi = self.free_energy(xi)

        # flip bit x_i of matrix xi and preserve all other bits x_{\i}
        # Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], but assigns
        # the result to xi_flip, instead of working in place on xi.
        xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx])

        # calculate free energy with bit flipped
        fe_xi_flip = self.free_energy(xi_flip)

        # equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i})))
        cost = T.mean(self.n_visible * T.log(T.nnet.sigmoid(fe_xi_flip -
                                                            fe_xi)))

        # increment bit_i_idx % number as part of updates
        updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible

        return cost
项目:yadll    作者:pchavanne    | 项目源码 | 文件源码
def binary_accuracy(prediction, target):
    return T.mean(T.eq(prediction, T.round(target)))
项目:yadll    作者:pchavanne    | 项目源码 | 文件源码
def binary_error(prediction, target):
    return T.mean(T.eq(prediction, T.round(target)))
项目:keraflow    作者:ipod825    | 项目源码 | 文件源码
def round(self, x):
        return T.round(x)
项目:Vulcan    作者:rfratila    | 项目源码 | 文件源码
def create_validator(self):
        """
        Generate theano function to check error and accuracy of the network.

        Returns: theano function that takes input (train_x,train_y)
                 and returns error and accuracy
        """
        print("Creating {} Validator...".format(self.name))
        # create prediction
        val_prediction = lasagne.layers.get_output(
            self.network,
            deterministic=True
        )
        # check how much error in prediction
        if self.val_cost is None:
            if self.num_classes is None or self.num_classes == 0:
                self.val_cost = self.mse_loss(val_prediction, self.y)
                val_acc = T.constant(0)
            else:
                self.val_cost = self.cross_entropy_loss(val_prediction, self.y)
                # check the accuracy of the prediction
                if self.num_classes > 1:
                    val_acc = T.mean(T.eq(T.argmax(val_prediction, axis=1),
                                     T.argmax(self.y, axis=1)),
                                     dtype=theano.config.floatX)
                elif self.num_classes == 1:
                    val_acc = T.mean(T.eq(T.round(val_prediction,
                                                  mode='half_away_from_zero'),
                                          self.y),
                                     dtype=theano.config.floatX)

        return theano.function([self.input_var, self.y],
                               [self.val_cost, val_acc])
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def round(x):
    return T.round(x, mode='half_to_even')
项目:NNBuilder    作者:aeloyq    | 项目源码 | 文件源码
def round(self, t):
            return T.round(t)
项目:cnn-bnn    作者:jpdz    | 项目源码 | 文件源码
def binarization(W,H,binary=True,deterministic=False,stochastic=False,srng=None):

    # (deterministic == True) <-> test-time <-> inference-time
    if not binary or (deterministic and stochastic):
        # print("not binary")
        Wb = W

    else:

        # [-1,1] -> [0,1]
        Wb = hard_sigmoid(W/H)

        # Stochastic BinaryConnect
        if stochastic:

            # print("stoch")
            Wb = T.cast(srng.binomial(n=1, p=Wb, size=T.shape(Wb)), theano.config.floatX)

        # Deterministic BinaryConnect (round to nearest)
        else:
            # print("det")
            Wb = T.round(Wb)

        # 0 or 1 -> -1 or 1
        Wb = T.cast(T.switch(Wb,H,-H), theano.config.floatX)

    return Wb

# This class extends the Lasagne DenseLayer to support BinaryConnect
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def round(x):
    return T.round(x)
项目:theano-xnor-net    作者:gplhegde    | 项目源码 | 文件源码
def binarize_conv_filters(W):
    """Binarize convolution weights and find the weight scaling factor
    W : theano tensor : convolution layer weight of dimension no_filters x no_feat_maps x h x w
    """
    # symbolic binary weight
    Wb = T.cast(T.switch(T.ge(W, 0),1,-1), theano.config.floatX)
    # BinaryNet method
    #Wb = T.cast(T.switch(T.round(hard_sigmoid(W),1,-1)), theano.config.floatX)

    # weight scaling factor
    # FIXME: directly compute the mean along axis 1,2,3 instead of reshaping    
    alpha = T.mean( T.reshape(T.abs_(W), (W.shape[0], W.shape[1]*W.shape[2]*W.shape[3])), axis=1)

    return Wb, alpha
项目:theano-xnor-net    作者:gplhegde    | 项目源码 | 文件源码
def binarize_fc_weights(W):
    # symbolic binary weight
    Wb = T.cast(T.switch(T.ge(W, 0),1,-1), theano.config.floatX)
    # BinaryNet method
    #Wb = T.cast(T.switch(T.round(hard_sigmoid(W)),1,-1), theano.config.floatX)

    alpha = T.mean(T.abs_(W), axis=0)
    return Wb, alpha
项目:theano-xnor-net    作者:gplhegde    | 项目源码 | 文件源码
def to_fixed_point_theano(input, no_bits, no_int_bits):
    scale =T.cast(2.**(no_bits - no_int_bits), theano.config.floatX)
    max_val = T.cast((2.**no_bits) - 1, theano.config.floatX)
    scaled = input * scale
    scaled = T.round(scaled)
    scaled = T.clip(scaled, -max_val, max_val)
    return scaled/scale
项目:theano-xnor-net    作者:gplhegde    | 项目源码 | 文件源码
def fixed_point(array, no_mag_bits, no_int_bits):
    """Convert to fixed point and convert it back to float
    """
    factor = 2.0 ** (no_mag_bits - no_int_bits)
    max_val = 2. ** no_mag_bits - 1
    scaled_arr = array * factor
    # round to the nearest value
    scaled_arr = np.around(scaled_arr)
    # saturation
    scaled_arr = np.clip(scaled_arr, -max_val, max_val)
    return scaled_arr/factor
项目:reading-text-in-the-wild    作者:mathDR    | 项目源码 | 文件源码
def round(x):
    return T.round(x)
项目:pklGzMakerForTheano    作者:indra622    | 项目源码 | 文件源码
def get_pseudo_likelihood_cost(self, updates):
        """Stochastic approximation to the pseudo-likelihood"""

        # index of bit i in expression p(x_i | x_{\i})
        bit_i_idx = theano.shared(value=0, name='bit_i_idx')

        # binarize the input image by rounding to nearest integer
        xi = T.round(self.input)

        # calculate free energy for the given bit configuration
        fe_xi = self.free_energy(xi)

        # flip bit x_i of matrix xi and preserve all other bits x_{\i}
        # Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], but assigns
        # the result to xi_flip, instead of working in place on xi.
        xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx])

        # calculate free energy with bit flipped
        fe_xi_flip = self.free_energy(xi_flip)

        # equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i})))
        cost = T.mean(self.n_visible * T.log(T.nnet.sigmoid(fe_xi_flip -
                                                            fe_xi)))

        # increment bit_i_idx % number as part of updates
        updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible

        return cost
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def round(x):
    return T.round(x, mode='half_to_even')
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def round(x):
    return T.round(x, mode='half_to_even')
项目:weather-modelling    作者:flipdazed    | 项目源码 | 文件源码
def getPseudoLikelihoodCost(self, updates):
        """Stochastic approximation to the pseudo-likelihood"""

        # index of bit i in expression p(x_i | x_{\i})
        bit_i_idx = theano.shared(value=0, name='bit_i_idx')

        # binarize the inputs image by rounding to nearest integer
        xi = T.round(self.inputs)

        # calculate free energy for the given bit configuration
        fe_xi = self.freeEnergy(xi)

        # flip bit x_i of matrix xi and preserve all other bits x_{\i}
        # Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], but assigns
        # the result to xi_flip, instead of working in place on xi.
        xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx])

        # calculate free energy with bit flipped
        fe_xi_flip = self.freeEnergy(xi_flip)

        # equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i})))
        cost = T.mean(self.n_visible * T.log(activation(fe_xi_flip - fe_xi)))

        # increment bit_i_idx % number as part of updates
        updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible
        return cost
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def round(x):
    return T.round(x)
项目:sigma-delta    作者:petered    | 项目源码 | 文件源码
def __call__(self, inputs):
        if self.scale != 1:
            import theano
            inputs = inputs * np.array(self.scale, dtype=theano.config.floatX)
        inc_phi = self.phi + inputs
        spikes = tt.round(inc_phi)
        new_phi = inc_phi-spikes
        add_update(self.phi, new_phi)
        return spikes
项目:odin_old    作者:trungnt13    | 项目源码 | 文件源码
def round(x):
    return T.round(x)
项目:keras-recommendation    作者:sonyisme    | 项目源码 | 文件源码
def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
        self.optimizer = optimizers.get(optimizer)
        self.loss = weighted_objective(objectives.get(loss))

        # input of model
        self.X_train = self.get_input(train=True)
        self.X_test = self.get_input(train=False)

        self.y_train = self.get_output(train=True)
        self.y_test = self.get_output(train=False)

        # target of model
        self.y = T.zeros_like(self.y_train)

        self.weights = T.ones_like(self.y_train)

        train_loss = self.loss(self.y, self.y_train, self.weights)
        test_loss = self.loss(self.y, self.y_test, self.weights)

        train_loss.name = 'train_loss'
        test_loss.name = 'test_loss'
        self.y.name = 'y'

        if class_mode == "categorical":
            train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
            test_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_test, axis=-1)))

        elif class_mode == "binary":
            train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
            test_accuracy = T.mean(T.eq(self.y, T.round(self.y_test)))
        else:
            raise Exception("Invalid class mode:" + str(class_mode))
        self.class_mode = class_mode

        for r in self.regularizers:
            train_loss = r(train_loss)
        updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)

        if type(self.X_train) == list:
            train_ins = self.X_train + [self.y, self.weights]
            test_ins = self.X_test + [self.y, self.weights]
            predict_ins = self.X_test
        else:
            train_ins = [self.X_train, self.y, self.weights]
            test_ins = [self.X_test, self.y, self.weights]
            predict_ins = [self.X_test]

        self._train = theano.function(train_ins, train_loss,
            updates=updates, allow_input_downcast=True, mode=theano_mode)
        self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy],
            updates=updates, allow_input_downcast=True, mode=theano_mode)
        self._predict = theano.function(predict_ins, self.y_test,
            allow_input_downcast=True, mode=theano_mode)
        self._test = theano.function(test_ins, test_loss,
            allow_input_downcast=True, mode=theano_mode)
        self._test_with_acc = theano.function(test_ins, [test_loss, test_accuracy],
            allow_input_downcast=True, mode=theano_mode)
项目:KEHNN    作者:MarkWuNLP    | 项目源码 | 文件源码
def __init__(self, rng, linp, rinp, n_in, n_out, W=None, b=None):
        """ Initialize the parameters of the logistic regression

        :type linp: theano.tensor.TensorType
        :param linp: symbolic variable that describes the left input of the
        architecture (one minibatch)

        :type rinp: theano.tensor.TensorType
        :param rinp: symbolic variable that describes the right input of the
        architecture (one minibatch)

        :type n_in: int
        :param n_in: number of left input units

        :type n_out: int
        :param n_out: number of right input units

        """

        # initialize with 0 the weights W as a matrix of shape (n_in, n_out)
        if W is None:
            if n_in == n_out:
                self.W = theano.shared(ortho_weight(n_in),borrow=True)
            else:
                W_bound = numpy.sqrt(6. / (n_in+n_out))
                self.W = theano.shared(numpy.asarray(rng.uniform(low=-W_bound,high=W_bound,size=(n_in,n_out
                                                                                                 )),
                                                dtype=theano.config.floatX),borrow=True)
        else:
            self.W = W

        if b is None:
            self.b = theano.shared(value=0., name='b')
            self.b = theano.tensor.addbroadcast(self.b)
           #self.b = theano.tensor.set_subtensor(self.b,0.)
        else:
            self.b = b

        # compute vector of class-membership probabilities in symbolic form
        self.p_y_given_x = T.nnet.sigmoid(T.batched_dot(T.dot(linp, self.W), rinp)+ self.b)
        self.predict_y = T.round(self.p_y_given_x)

        # parameters of the model
        self.params = [self.W, self.b]
项目:VideoGAN    作者:amartya18x    | 项目源码 | 文件源码
def ready(self):
        # input (where first dimension is time)
        self.x = T.matrix()
        # target (where first dimension is time)
        if self.output_type == 'real':
            self.y = T.matrix(name='y', dtype=theano.config.floatX)
        elif self.output_type == 'binary':
            self.y = T.matrix(name='y', dtype='int32')
        elif self.output_type == 'softmax':  # only vector labels supported
            self.y = T.vector(name='y', dtype='int32')
        else:
            raise NotImplementedError
        # initial hidden state of the RNN
        self.h0 = T.vector()
        # learning rate
        self.lr = T.scalar()

        if self.activation == 'tanh':
            activation = T.tanh
        elif self.activation == 'sigmoid':
            activation = T.nnet.sigmoid
        elif self.activation == 'relu':
            activation = lambda x: x * (x > 0)
        elif self.activation == 'cappedrelu':
            activation = lambda x: T.minimum(x * (x > 0), 6)
        else:
            raise NotImplementedError

        self.rnn = RNN(input=self.x, n_in=self.n_in,
                       n_hidden=self.n_hidden, n_out=self.n_out,
                       activation=activation, output_type=self.output_type,
                       use_symbolic_softmax=self.use_symbolic_softmax)

        if self.output_type == 'real':
            self.predict = theano.function(inputs=[self.x, ],
                                           outputs=self.rnn.y_pred,
                                           mode=mode)
        elif self.output_type == 'binary':
            self.predict_proba = theano.function(inputs=[self.x, ],
                                outputs=self.rnn.p_y_given_x, mode=mode)
            self.predict = theano.function(inputs=[self.x, ],
                                outputs=T.round(self.rnn.p_y_given_x),
                                mode=mode)
        elif self.output_type == 'softmax':
            self.predict_proba = theano.function(inputs=[self.x, ],
                        outputs=self.rnn.p_y_given_x, mode=mode)
            self.predict = theano.function(inputs=[self.x, ],
                                outputs=self.rnn.y_out, mode=mode)
        else:
            raise NotImplementedError
项目:single-cell-classification    作者:whuTommy    | 项目源码 | 文件源码
def ready(self):
        # input (where first dimension is time)
        self.x = T.matrix()
        # target (where first dimension is time)
        if self.output_type == 'real':
            self.y = T.matrix(name='y', dtype=theano.config.floatX)
        elif self.output_type == 'binary':
            self.y = T.matrix(name='y', dtype='int32')
        elif self.output_type == 'softmax':  # only vector labels supported
            self.y = T.vector(name='y', dtype='int32')
        else:
            raise NotImplementedError
        # initial hidden state of the RNN
        self.h0 = T.vector()
        # learning rate
        self.lr = T.scalar()

        if self.activation == 'tanh':
            activation = T.tanh
        elif self.activation == 'sigmoid':
            activation = T.nnet.sigmoid
        elif self.activation == 'relu':
            activation = lambda x: x * (x > 0)
        elif self.activation == 'cappedrelu':
            activation = lambda x: T.minimum(x * (x > 0), 6)
        else:
            raise NotImplementedError

        self.rnn = RNN(input=self.x, n_in=self.n_in,
                       n_hidden=self.n_hidden, n_out=self.n_out,
                       activation=activation, output_type=self.output_type,
                       use_symbolic_softmax=self.use_symbolic_softmax)

        if self.output_type == 'real':
            self.predict = theano.function(inputs=[self.x, ],
                                           outputs=self.rnn.y_pred,
                                           mode=mode)
        elif self.output_type == 'binary':
            self.predict_proba = theano.function(inputs=[self.x, ],
                                                 outputs=self.rnn.p_y_given_x, mode=mode)
            self.predict = theano.function(inputs=[self.x, ],
                                           outputs=T.round(self.rnn.p_y_given_x),
                                           mode=mode)
        elif self.output_type == 'softmax':
            self.predict_proba = theano.function(inputs=[self.x, ],
                                                 outputs=self.rnn.p_y_given_x, mode=mode)
            self.predict = theano.function(inputs=[self.x, ],
                                           outputs=self.rnn.y_out, mode=mode)
        else:
            raise NotImplementedError
项目:single-cell-classification    作者:whuTommy    | 项目源码 | 文件源码
def ready(self):
        # input (where first dimension is time)
        self.x = T.matrix()
        # target (where first dimension is time)
        if self.output_type == 'real':
            self.y = T.matrix(name='y', dtype=theano.config.floatX)
        elif self.output_type == 'binary':
            self.y = T.matrix(name='y', dtype='int32')
        elif self.output_type == 'softmax':  # only vector labels supported
            self.y = T.vector(name='y', dtype='int32')
        else:
            raise NotImplementedError
        # initial hidden state of the RNN
        self.h0 = T.vector()
        # learning rate
        self.lr = T.scalar()

        if self.activation == 'tanh':
            activation = T.tanh
        elif self.activation == 'sigmoid':
            activation = T.nnet.sigmoid
        elif self.activation == 'relu':
            activation = lambda x: x * (x > 0)
        elif self.activation == 'cappedrelu':
            activation = lambda x: T.minimum(x * (x > 0), 6)
        else:
            raise NotImplementedError

        self.rnn = RNN(input=self.x, n_in=self.n_in,
                       n_hidden=self.n_hidden, n_out=self.n_out,
                       activation=activation, output_type=self.output_type,
                       use_symbolic_softmax=self.use_symbolic_softmax)

        if self.output_type == 'real':
            self.predict = theano.function(inputs=[self.x, ],
                                           outputs=self.rnn.y_pred,
                                           mode=mode)
        elif self.output_type == 'binary':
            self.predict_proba = theano.function(inputs=[self.x, ],
                                                 outputs=self.rnn.p_y_given_x, mode=mode)
            self.predict = theano.function(inputs=[self.x, ],
                                           outputs=T.round(self.rnn.p_y_given_x),
                                           mode=mode)
        elif self.output_type == 'softmax':
            self.predict_proba = theano.function(inputs=[self.x, ],
                                                 outputs=self.rnn.p_y_given_x, mode=mode)
            self.predict = theano.function(inputs=[self.x, ],
                                           outputs=self.rnn.y_out, mode=mode)
        else:
            raise NotImplementedError
项目:single-cell-classification    作者:whuTommy    | 项目源码 | 文件源码
def ready(self):
        # input (where first dimension is time)
        self.x = T.matrix()
        # target (where first dimension is time)
        if self.output_type == 'real':
            self.y = T.matrix(name='y', dtype=theano.config.floatX)
        elif self.output_type == 'binary':
            self.y = T.matrix(name='y', dtype='int32')
        elif self.output_type == 'softmax':  # only vector labels supported
            self.y = T.vector(name='y', dtype='int32')
        else:
            raise NotImplementedError
        # initial hidden state of the RNN
        self.h0 = T.vector()
        # learning rate
        self.lr = T.scalar()

        if self.activation == 'tanh':
            activation = T.tanh
        elif self.activation == 'sigmoid':
            activation = T.nnet.sigmoid
        elif self.activation == 'relu':
            activation = lambda x: x * (x > 0)
        elif self.activation == 'cappedrelu':
            activation = lambda x: T.minimum(x * (x > 0), 6)
        else:
            raise NotImplementedError

        self.rnn = RNN(input=self.x, n_in=self.n_in,
                       n_hidden=self.n_hidden, n_out=self.n_out,
                       activation=activation, output_type=self.output_type,
                       use_symbolic_softmax=self.use_symbolic_softmax)

        if self.output_type == 'real':
            self.predict = theano.function(inputs=[self.x, ],
                                           outputs=self.rnn.y_pred,
                                           mode=mode)
        elif self.output_type == 'binary':
            self.predict_proba = theano.function(inputs=[self.x, ],
                                                 outputs=self.rnn.p_y_given_x, mode=mode)
            self.predict = theano.function(inputs=[self.x, ],
                                           outputs=T.round(self.rnn.p_y_given_x),
                                           mode=mode)
        elif self.output_type == 'softmax':
            self.predict_proba = theano.function(inputs=[self.x, ],
                                                 outputs=self.rnn.p_y_given_x, mode=mode)
            self.predict = theano.function(inputs=[self.x, ],
                                           outputs=self.rnn.y_out, mode=mode)
        else:
            raise NotImplementedError
项目:sigma-delta    作者:petered    | 项目源码 | 文件源码
def get_all_signals(self, input_, corruption_type = 'round', rng = None):
        scale = self.get_scale()
        scaled_input = input_*scale
        if corruption_type == 'round':
            epsilon = tt.round(scaled_input) - scaled_input
        elif corruption_type == 'randround':
            rng = get_theano_rng(rng)
            epsilon = tt.where(rng.uniform(scaled_input.shape)>(scaled_input % 1), tt.floor(scaled_input), tt.ceil(scaled_input))-scaled_input
            print 'STOCH ROUNDING'
        elif corruption_type == 'rand':
            rng = get_theano_rng(1234)
            epsilon = rng.uniform(scaled_input.shape)-.5
        else:
            raise Exception('fdsfsd')
        spikes = scaled_input + epsilon
        output = spikes / scale
        signals = dict(
            input=input_,
            scaled_input=scaled_input,
            spikes=spikes,
            epsilon=epsilon,
            output=output,
            )
        return signals

    # def get_all_signals(self, input_):
    #     scale = self.get_scale()
    #
    #
    #
    #     scaled_input = input_*scale
    #
    #
    #
    #     # epsilon = tt.round(scaled_input) - scaled_input
    #
    #     rng = get_theano_rng(1234)
    #     epsilon = rng.uniform(scaled_input.shape)-.5
    #
    #     spikes = scaled_input + epsilon
    #     output = spikes / scale
    #     signals = dict(
    #         input=input_,
    #         scaled_input=scaled_input,
    #         spikes=spikes,
    #         epsilon=epsilon,
    #         output=output,
    #         )
    #     return signals
项目:MultiTurnResponseSelection    作者:MarkWuNLP    | 项目源码 | 文件源码
def __init__(self, rng, linp, rinp, n_in, n_out, W=None, b=None):
        """ Initialize the parameters of the logistic regression

        :type linp: theano.tensor.TensorType
        :param linp: symbolic variable that describes the left input of the
        architecture (one minibatch)

        :type rinp: theano.tensor.TensorType
        :param rinp: symbolic variable that describes the right input of the
        architecture (one minibatch)

        :type n_in: int
        :param n_in: number of left input units

        :type n_out: int
        :param n_out: number of right input units

        """

        # initialize with 0 the weights W as a matrix of shape (n_in, n_out)
        if W is None:
            if n_in == n_out:
                self.W = theano.shared(ortho_weight(n_in),borrow=True)
            else:
                W_bound = numpy.sqrt(6. / (n_in+n_out))
                self.W = theano.shared(numpy.asarray(rng.uniform(low=-W_bound,high=W_bound,size=(n_in,n_out
                                                                                                 )),
                                                dtype=theano.config.floatX),borrow=True)
        else:
            self.W = W

        if b is None:
            self.b = theano.shared(value=0., name='b')
            self.b = theano.tensor.addbroadcast(self.b)
           #self.b = theano.tensor.set_subtensor(self.b,0.)
        else:
            self.b = b

        # compute vector of class-membership probabilities in symbolic form
        self.p_y_given_x = T.nnet.sigmoid(T.batched_dot(T.dot(linp, self.W), rinp)+ self.b)
        self.predict_y = T.round(self.p_y_given_x)

        # parameters of the model
        self.params = [self.W, self.b]
项目:RecommendationSystem    作者:TURuibo    | 项目源码 | 文件源码
def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
        self.optimizer = optimizers.get(optimizer)
        self.loss = weighted_objective(objectives.get(loss))

        # input of model
        self.X_train = self.get_input(train=True)
        self.X_test = self.get_input(train=False)

        self.y_train = self.get_output(train=True)
        self.y_test = self.get_output(train=False)

        # target of model
        self.y = T.zeros_like(self.y_train)

        self.weights = T.ones_like(self.y_train)

        train_loss = self.loss(self.y, self.y_train, self.weights)
        test_loss = self.loss(self.y, self.y_test, self.weights)

        train_loss.name = 'train_loss'
        test_loss.name = 'test_loss'
        self.y.name = 'y'

        if class_mode == "categorical":
            train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
            test_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_test, axis=-1)))

        elif class_mode == "binary":
            train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
            test_accuracy = T.mean(T.eq(self.y, T.round(self.y_test)))
        else:
            raise Exception("Invalid class mode:" + str(class_mode))
        self.class_mode = class_mode

        for r in self.regularizers:
            train_loss = r(train_loss)
        updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)

        if type(self.X_train) == list:
            train_ins = self.X_train + [self.y, self.weights]
            test_ins = self.X_test + [self.y, self.weights]
            predict_ins = self.X_test
        else:
            train_ins = [self.X_train, self.y, self.weights]
            test_ins = [self.X_test, self.y, self.weights]
            predict_ins = [self.X_test]

        self._train = theano.function(train_ins, train_loss,
            updates=updates, allow_input_downcast=True, mode=theano_mode)
        self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy],
            updates=updates, allow_input_downcast=True, mode=theano_mode)
        self._predict = theano.function(predict_ins, self.y_test,
            allow_input_downcast=True, mode=theano_mode)
        self._test = theano.function(test_ins, test_loss,
            allow_input_downcast=True, mode=theano_mode)
        self._test_with_acc = theano.function(test_ins, [test_loss, test_accuracy],
            allow_input_downcast=True, mode=theano_mode)
项目:TACNTN    作者:MarkWuNLP    | 项目源码 | 文件源码
def __init__(self, rng, linp, rinp, n_in, n_out, W=None, b=None):
        """ Initialize the parameters of the logistic regression

        :type linp: theano.tensor.TensorType
        :param linp: symbolic variable that describes the left input of the
        architecture (one minibatch)

        :type rinp: theano.tensor.TensorType
        :param rinp: symbolic variable that describes the right input of the
        architecture (one minibatch)

        :type n_in: int
        :param n_in: number of left input units

        :type n_out: int
        :param n_out: number of right input units

        """

        # initialize with 0 the weights W as a matrix of shape (n_in, n_out)
        if W is None:
            if n_in == n_out:
                self.W = theano.shared(ortho_weight(n_in),borrow=True)
            else:
                W_bound = numpy.sqrt(6. / (n_in+n_out))
                self.W = theano.shared(numpy.asarray(rng.uniform(low=-W_bound,high=W_bound,size=(n_in,n_out
                                                                                                 )),
                                                dtype=theano.config.floatX),borrow=True)
        else:
            self.W = W

        if b is None:
            self.b = theano.shared(value=0., name='b')
            self.b = theano.tensor.addbroadcast(self.b)
           #self.b = theano.tensor.set_subtensor(self.b,0.)
        else:
            self.b = b

        # compute vector of class-membership probabilities in symbolic form
        self.p_y_given_x = T.nnet.sigmoid(T.batched_dot(T.dot(linp, self.W), rinp)+ self.b)
        self.predict_y = T.round(self.p_y_given_x)

        # parameters of the model
        self.params = [self.W, self.b]