Python torch.nn 模块,RNNCell() 实例源码

我们从Python开源项目中,提取了以下13个代码示例,用于说明如何使用torch.nn.RNNCell()

项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_RNN_cell(self):
        # this is just a smoke test; these modules are implemented through
        # autograd so no Jacobian test is needed
        for module in (nn.RNNCell, nn.GRUCell):
            for bias in (True, False):
                input = Variable(torch.randn(3, 10))
                hx = Variable(torch.randn(3, 20))
                cell = module(10, 20, bias=bias)
                for i in range(6):
                    hx = cell(input, hx)

                hx.sum().backward()
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_RNN_cell(self):
        # this is just a smoke test; these modules are implemented through
        # autograd so no Jacobian test is needed
        for module in (nn.RNNCell, nn.GRUCell):
            for bias in (True, False):
                input = Variable(torch.randn(3, 10))
                hx = Variable(torch.randn(3, 20))
                cell = module(10, 20, bias=bias)
                for i in range(6):
                    hx = cell(input, hx)

                hx.sum().backward()
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_RNN_cell(self):
        # this is just a smoke test; these modules are implemented through
        # autograd so no Jacobian test is needed
        for module in (nn.RNNCell, nn.GRUCell):
            for bias in (True, False):
                input = Variable(torch.randn(3, 10))
                hx = Variable(torch.randn(3, 20))
                cell = module(10, 20, bias=bias)
                for i in range(6):
                    hx = cell(input, hx)

                hx.sum().backward()
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_RNN_cell(self):
        # this is just a smoke test; these modules are implemented through
        # autograd so no Jacobian test is needed
        for module in (nn.RNNCell, nn.GRUCell):
            for bias in (True, False):
                input = Variable(torch.randn(3, 10))
                hx = Variable(torch.randn(3, 20))
                cell = module(10, 20, bias=bias)
                for i in range(6):
                    hx = cell(input, hx)

                hx.sum().backward()
项目:RSAM    作者:renll    | 项目源码 | 文件源码
def __init__(self):
        super(Net, self).__init__()
        self.nhid=256
        self.nhid0=256
        self.rnn0 = nn.LSTMCell(256,256)        
        #self.downsample=nn.Conv2d(3, 3, kernel_size=2,stride=2)
        self.conv1 = nn.Conv2d(3, 16, kernel_size=5,padding=2)
        self.conv11 = nn.Conv2d(16, 32, kernel_size=5,padding=2)
        self.convo1 = nn.Conv2d(32, 4, kernel_size=1)
        #self.conv12 = nn.Conv2d(32, 64, kernel_size=5,padding=2) 
        # self.rnn0 = nn.RNNCell(1024,1024, nonlinearity='relu')
        self.conv21 = nn.Conv2d(3,8, kernel_size=5,padding=2)
        #self.convo1 = nn.Conv2d(32, 16, kernel_size=1)
        self.conv22 = nn.Conv2d(8, 16, kernel_size=5,padding=2)
        self.conv23 = nn.Conv2d(16, 16, kernel_size=3,padding=1)

        self.rnn1 = nn.LSTMCell(256,256) 
        # self.convo3 = nn.Conv2d(32, 16, kernel_size=1)
        self.BN1 =nn.BatchNorm2d(8)
        self.BN2 =nn.BatchNorm2d(16)

        self.BN22 =nn.BatchNorm2d(16)
        self.BN3 =nn.BatchNorm2d(32)
        self.BN02 =nn.BatchNorm2d(16)
        self.BN03 =nn.BatchNorm2d(32)
        self.BN4=nn.BatchNorm2d(64)
        self.BN5 =nn.BatchNorm2d(128)
        self.BN6 =nn.BatchNorm1d(256)
        self.BN0 =nn.BatchNorm1d(1024)
        self.fc1 = nn.Linear(256, 1024)
        self.fc2 = nn.Linear(2, 256)
        self.fc3 = nn.Linear(256, 10)
        self.fc4 = nn.Linear(192, 256)
        self.fc5 = nn.Linear(512, 256)
        # self.fc1 = nn.Linear(400, 120)
        # self.BN3 =nn.BatchNorm1d(120)
        # self.fc2 = nn.Linear(120, 84)
        # self.BN4 =nn.BatchNorm1d(84)
        # self.fc3 = nn.Linear(84, 10)
项目:NeuroNLP2    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, *args, **kwargs):
        super(MaskedRNN, self).__init__(nn.RNNCell, *args, **kwargs)
项目:pytorch-dynamic-batching    作者:mrdrozdov-github    | 项目源码 | 文件源码
def __init__(self,
                 model_dim=None,
                 mlp_dim=None,
                 num_classes=None,
                 word_embedding_dim=None,
                 initial_embeddings=None,
                 **kwargs):
        super(Net, self).__init__()
        self.model_dim = model_dim
        self.initial_embeddings = initial_embeddings
        self.rnn = nn.RNNCell(word_embedding_dim, model_dim)
        self.l0 = nn.Linear(model_dim, mlp_dim)
        self.l1 = nn.Linear(mlp_dim, num_classes)
项目:pytorch-dynamic-batching    作者:mrdrozdov-github    | 项目源码 | 文件源码
def __init__(self,
                 model_dim=None,
                 mlp_dim=None,
                 num_classes=None,
                 word_embedding_dim=None,
                 initial_embeddings=None,
                 **kwargs):
        super(Net, self).__init__()
        self.word_embedding_dim = word_embedding_dim
        self.model_dim = model_dim
        self.initial_embeddings = initial_embeddings
        self.rnn = nn.RNNCell(word_embedding_dim, model_dim)
        self.l0 = nn.Linear(model_dim, mlp_dim)
        self.l1 = nn.Linear(mlp_dim, num_classes)
项目:pytorch-dynamic-batching    作者:mrdrozdov-github    | 项目源码 | 文件源码
def __init__(self,
                 model_dim=None,
                 mlp_dim=None,
                 num_classes=None,
                 word_embedding_dim=None,
                 initial_embeddings=None,
                 **kwargs):
        super(Net, self).__init__()
        self.model_dim = model_dim
        self.initial_embeddings = initial_embeddings
        self.rnn = nn.RNNCell(word_embedding_dim, model_dim)
        self.l0 = nn.Linear(model_dim, mlp_dim)
        self.l1 = nn.Linear(mlp_dim, num_classes)
项目:pytorch-dynamic-batching    作者:mrdrozdov-github    | 项目源码 | 文件源码
def __init__(self,
                 model_dim=None,
                 mlp_dim=None,
                 num_classes=None,
                 word_embedding_dim=None,
                 initial_embeddings=None,
                 **kwargs):
        super(Net, self).__init__()
        self.model_dim = model_dim
        self.initial_embeddings = initial_embeddings
        self.rnn = nn.RNNCell(word_embedding_dim, model_dim)
        self.l0 = nn.Linear(model_dim, mlp_dim)
        self.l1 = nn.Linear(mlp_dim, num_classes)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_RNN_cell(self):
        # this is just a smoke test; these modules are implemented through
        # autograd so no Jacobian test is needed
        for module in (nn.RNNCell, nn.GRUCell):
            for bias in (True, False):
                input = Variable(torch.randn(3, 10))
                hx = Variable(torch.randn(3, 20))
                cell = module(10, 20, bias=bias)
                for i in range(6):
                    hx = cell(input, hx)

                hx.sum().backward()
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_RNN_cell_no_broadcasting(self):
        def test(cell_module, input, hx, input_size, hidden_size):
            cell = cell_module(input_size, hidden_size)
            self.assertRaises(RuntimeError, lambda: cell(input, hx))

        def test_all(hidden_size, bad_hx, good_hx, input_size, input):
            test(nn.RNNCell, input, bad_hx, input_size, hidden_size)
            test(nn.GRUCell, input, bad_hx, input_size, hidden_size)
            test(nn.LSTMCell, input, (bad_hx, good_hx), input_size, hidden_size)
            test(nn.LSTMCell, input, (good_hx, bad_hx), input_size, hidden_size)

        hidden_size = 20
        input_size = 10
        input = Variable(torch.randn(3, input_size))
        bad_hx = Variable(torch.randn(1, hidden_size))
        good_hx = Variable(torch.randn(3, hidden_size))

        # Test hidden/input batch size broadcasting
        test_all(hidden_size, bad_hx, good_hx, input_size, input)

        # Test hx's hidden_size vs module's hidden_size broadcasting
        bad_hx = Variable(torch.randn(3, 1))
        test_all(hidden_size, bad_hx, good_hx, input_size, input)

        # Test input's input_size vs module's input_size broadcasting
        bad_input = Variable(torch.randn(3, 1))
        test_all(hidden_size, good_hx, good_hx, input_size, bad_input)
项目:sequence_generation_pytorch    作者:osm3000    | 项目源码 | 文件源码
def __init__(self, cell_type="lstm", input_size=1, hidden_size=20, output_size=1, nonlinearity="tanh"):
        super(lstm_rnn_gru, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.nonlinearity = nonlinearity.lower()
        assert self.nonlinearity in ['tanh', 'relu']

        self.cell_type = cell_type.lower()
        if self.cell_type == "lstm":
            self.layer1 = nn.LSTMCell(input_size=self.input_size, hidden_size=self.hidden_size)
            self.layer2 = nn.LSTMCell(input_size=self.hidden_size, hidden_size=self.output_size)
        elif self.cell_type == "rnn":
            self.layer1 = nn.RNNCell(input_size=self.input_size, hidden_size=self.hidden_size, nonlinearity=self.nonlinearity)
            self.layer2 = nn.RNNCell(input_size=self.hidden_size, hidden_size=self.output_size, nonlinearity=self.nonlinearity)
        elif self.cell_type == "gru":
            self.layer1 = nn.GRUCell(input_size=self.input_size, hidden_size=self.hidden_size)
            self.layer2 = nn.GRUCell(input_size=self.hidden_size, hidden_size=self.output_size)
        else:
            raise ("Please enter a good cell type (LSTM/RNN/GRU)")

        self.layer1.weight_hh.data.normal_(0.0, 0.1)
        self.layer1.weight_ih.data.normal_(0.0, 0.1)
        self.layer2.weight_hh.data.normal_(0.0, 0.1)
        self.layer2.weight_ih.data.normal_(0.0, 0.1)

        # Should I do something about the biases here?