Python theano.tensor.nnet 模块,softmax() 实例源码

我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用theano.tensor.nnet.softmax()

项目:python-machine-learning    作者:sho-87    | 项目源码 | 文件源码
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
        self.inpt = inpt.reshape((mini_batch_size, self.n_in))
        self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
        self.y_out = T.argmax(self.output, axis=1)  # Predicted class
        self.inpt_dropout = dropout_layer(
            inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
        self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)
项目:machine-deep_learning    作者:Charleswyt    | 项目源码 | 文件源码
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
        self.inpt = inpt.reshape((mini_batch_size, self.n_in))
        self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
        self.y_out = T.argmax(self.output, axis=1)
        self.inpt_dropout = dropout_layer(
            inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
        self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)
项目:reinforcement-learning-policy-gradients    作者:DarkElement75    | 项目源码 | 文件源码
def set_inpt(self, inpt, mini_batch_size, timestep_n):

        #Reshape 3d to 2d so we can softmax correctly
        self.inpt = inpt.reshape((mini_batch_size*timestep_n, self.n_in))

        #The wx+b changes our 2d input to be the correct output shape
        self.inpt = softmax(T.dot(self.inpt, self.w) + self.b)

        #Finally, now that we have the correct output shape, we 
        #Convert back to 3d, making sure to use self.n_out, since this is the output
        #And it's already correctly shaped, just in 2d.
        self.output = self.inpt.reshape((mini_batch_size, timestep_n, self.n_out))
项目:reinforcement-learning-policy-gradients    作者:DarkElement75    | 项目源码 | 文件源码
def set_inpt(self, inpt, mini_batch_size):
        self.inpt = inpt.reshape((mini_batch_size, self.n_in))
        self.output = softmax(T.dot(self.inpt, self.w) + self.b)
        self.y_out = T.argmax(self.output, axis=1)
项目:reinforcement-learning-policy-gradients    作者:DarkElement75    | 项目源码 | 文件源码
def set_inpt(self, inpt, mini_batch_size, timestep_n):

        #Reshape 3d to 2d so we can softmax correctly
        self.inpt = inpt.reshape((mini_batch_size*timestep_n, self.n_in))

        #The wx+b changes our 2d input to be the correct output shape
        self.inpt = softmax(T.dot(self.inpt, self.w) + self.b)

        #Finally, now that we have the correct output shape, we 
        #Convert back to 3d, making sure to use self.n_out, since this is the output
        #And it's already correctly shaped, just in 2d.
        self.output = self.inpt.reshape((mini_batch_size, timestep_n, self.n_out))
项目:neural-networks-and-deep-learning    作者:skylook    | 项目源码 | 文件源码
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
        self.inpt = inpt.reshape((mini_batch_size, self.n_in))
        self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
        self.y_out = T.argmax(self.output, axis=1)
        self.inpt_dropout = dropout_layer(
            inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
        self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def build_prediction(self):
        # return NN.softmax(self.activation) #use this line to expose a slow subtensor
        # implementation
        return NN.sigmoid(self.activation)
项目:DeepLearningPython35    作者:MichalDanielDobrzanski    | 项目源码 | 文件源码
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
        self.inpt = inpt.reshape((mini_batch_size, self.n_in))
        self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
        self.y_out = T.argmax(self.output, axis=1)
        self.inpt_dropout = dropout_layer(
            inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
        self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)