Python theano.tensor 模块,ftensor4() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.ftensor4()

项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        import theano.tensor as T
        self.x = T.ftensor4('x')
        self.y = T.lvector('y')
        self.lr = T.scalar('lr')

        net = build_model_resnet50(input_shape=(None, 3, 224, 224))

        if self.verbose: print('Total number of layers:', len(lasagne.layers.get_all_layers(net['prob'])))

        self.output_layer = net['prob']

        from lasagne.layers import get_output
        self.output = lasagne.layers.get_output(self.output_layer, self.x, deterministic=False)
        self.cost = lasagne.objectives.categorical_crossentropy(self.output, self.y).mean()
        from lasagne.objectives import categorical_accuracy
        self.error = 1-categorical_accuracy(self.output, self.y, top_k=1).mean()
        self.error_top_5 = 1-categorical_accuracy(self.output, self.y, top_k=5).mean()
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        import theano.tensor as T
        self.x = T.ftensor4('x')
        self.y = T.lvector('y')
        self.lr = T.scalar('lr')

        net = build_model_vgg16(input_shape=(None, 3, 224, 224), verbose=self.verbose)
        self.output_layer = net['prob']

        from lasagne.layers import get_output
        self.output = lasagne.layers.get_output(self.output_layer, self.x, deterministic=False)
        self.cost = lasagne.objectives.categorical_crossentropy(self.output, self.y).mean()
        from lasagne.objectives import categorical_accuracy
        self.error = 1-categorical_accuracy(self.output, self.y, top_k=1).mean()
        self.error_top_5 = 1-categorical_accuracy(self.output, self.y, top_k=5).mean()
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):

        import theano.tensor as T
        self.x = T.ftensor4('x')
        self.y = T.lvector('y')
        self.lr = T.scalar('lr')

        net = build_model_resnet152(input_shape=(None, 3, 224, 224))

        self.output_layer = net['prob']

        from lasagne.layers import get_output
        self.output = lasagne.layers.get_output(self.output_layer, self.x, deterministic=False)
        self.cost = lasagne.objectives.categorical_crossentropy(self.output, self.y).mean()
        from lasagne.objectives import categorical_accuracy
        self.error = 1-categorical_accuracy(self.output, self.y, top_k=1).mean()
        self.error_top_5 = 1-categorical_accuracy(self.output, self.y, top_k=5).mean()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_zero_div():
    """Tests 0/x -> 0"""
    mode = theano.compile.mode.get_default_mode().including("local_zero_div")
    for t in (T.scalar, T.ivector, T.ftensor4):
        x = t('x')
        for op in (T.int_div, T.true_div):
            y = op(0, x)
            g = optimize(FunctionGraph([x], [y]))
            # the division should be gone
            divs = [node for node in g.toposort()
                    if isinstance(node.op, T.elemwise.Elemwise) and
                    isinstance(node.op.scalar_op, type(op.scalar_op))]
            assert len(divs) == 0
            # the output type should match the unoptimized one
            output = g.outputs[0]
            assert output.ndim == y.ndim
            assert output.type == y.type
            # and the output should be zero
            assert theano.tensor.get_scalar_constant_value(output) == 0
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_blocksparse_inplace_gemv_opt():
    b = tensor.fmatrix()
    W = tensor.ftensor4()
    h = tensor.ftensor3()
    iIdx = tensor.lmatrix()
    oIdx = tensor.lmatrix()

    o = sparse_block_dot(W, h, iIdx, b, oIdx)

    f = theano.function([W, h, iIdx, b, oIdx], o)

    if theano.config.mode == "FAST_COMPILE":
        assert not f.maker.fgraph.toposort()[-1].op.inplace
        assert check_stack_trace(f, ops_to_check=[sparse_block_gemv])
    else:
        assert f.maker.fgraph.toposort()[-1].op.inplace
        assert check_stack_trace(f, ops_to_check=[sparse_block_gemv_inplace])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_blocksparse_inplace_outer_opt():
    b = tensor.fmatrix()
    W = tensor.ftensor4()
    h = tensor.ftensor3()
    iIdx = tensor.lmatrix()
    oIdx = tensor.lmatrix()

    o = sparse_block_dot(W, h, iIdx, b, oIdx)

    f = theano.function([W, h, iIdx, b, oIdx],
                        [o, tensor.grad(o.sum(), wrt=W)])

    if theano.config.mode == "FAST_COMPILE":
        assert not f.maker.fgraph.toposort()[-1].op.inplace
        assert check_stack_trace(f, ops_to_check=sparse_block_outer)
    else:
        assert f.maker.fgraph.toposort()[-1].op.inplace
        assert check_stack_trace(f, ops_to_check=sparse_block_outer_inplace)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_sparseblockdot(self):
        """
        Compares the numpy version of sparseblockgemv to sparse_block_dot.
        """
        b = tensor.fmatrix()
        W = tensor.ftensor4()
        h = tensor.ftensor3()
        iIdx = tensor.imatrix()
        oIdx = tensor.imatrix()

        o = sparse_block_dot(W, h, iIdx, b, oIdx)

        f = theano.function([W, h, iIdx, b, oIdx], o, mode=self.mode)

        W_val, h_val, iIdx_val, b_val, oIdx_val = \
            BlockSparse_Gemv_and_Outer.gemv_data()

        th_out = f(W_val, h_val, iIdx_val, b_val, oIdx_val)

        ref_out = BlockSparse_Gemv_and_Outer.gemv_numpy(
            b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val)

        utt.assert_allclose(ref_out, th_out)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_sparseblockgemv(self):
        """
        Compares the numpy and theano versions of sparseblockgemv.
        """
        b = tensor.fmatrix()
        W = tensor.ftensor4()
        h = tensor.ftensor3()
        iIdx = tensor.imatrix()
        oIdx = tensor.imatrix()

        o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)

        f = theano.function([W, h, iIdx, b, oIdx], o, mode=self.mode)

        W_val, h_val, iIdx_val, b_val, oIdx_val = \
            BlockSparse_Gemv_and_Outer.gemv_data()

        th_out = f(W_val, h_val, iIdx_val, b_val, oIdx_val)
        ref_out = BlockSparse_Gemv_and_Outer.gemv_numpy(
            b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val)

        utt.assert_allclose(ref_out, th_out)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_sparseblockgemv_grad_shape(self):
        b = tensor.fmatrix()
        W = tensor.ftensor4()
        h = tensor.ftensor3()
        iIdx = tensor.imatrix()
        oIdx = tensor.imatrix()

        o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)
        go = theano.grad(o.sum(), [b, W, h])

        f = theano.function([W, h, iIdx, b, oIdx], go, mode=self.mode)

        W_val, h_val, iIdx_val, b_val, oIdx_val = \
            BlockSparse_Gemv_and_Outer.gemv_data()

        # just make sure that it runs correcly and all the shapes are ok.
        b_g, W_g, h_g = f(W_val, h_val, iIdx_val, b_val, oIdx_val)

        assert b_g.shape == b_val.shape
        assert h_g.shape == h_val.shape
        assert W_g.shape == W_val.shape
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_sparseblockouter(self):
        o = tensor.ftensor4()
        x = tensor.ftensor3()
        y = tensor.ftensor3()
        xIdx = tensor.imatrix()
        yIdx = tensor.imatrix()

        out = self.outer_op(o, x, y, xIdx, yIdx)

        f = theano.function([o, x, y, xIdx, yIdx], out,
                            on_unused_input="warn", mode=self.mode)

        o_val, x_val, y_val, xIdx_val, yIdx_val = \
            BlockSparse_Gemv_and_Outer.outer_data()

        th_out = f(o_val, x_val, y_val, xIdx_val, yIdx_val)
        ref_out = BlockSparse_Gemv_and_Outer.outer_numpy(
            o_val, x_val, y_val, xIdx_val, yIdx_val)

        utt.assert_allclose(ref_out, th_out)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_default_conv():
    """Just test that we introduce the right GPU convolution
    version.

    """
    img = theano.tensor.ftensor4()
    fil = theano.tensor.ftensor4()

    c = theano.tensor.nnet.conv2d(img, fil)
    f = theano.function([img, fil], c, mode=theano_mode)

    if cuda.dnn.dnn_available():
        assert any([isinstance(a.op, GpuDnnConv)
                    for a in f.maker.fgraph.apply_nodes])
    else:
        assert any([isinstance(a.op, cuda.blas.GpuCorrMM)
                    for a in f.maker.fgraph.apply_nodes])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_logical_shapes(self):
        # Logical shapes are not supported anymore, so we check that it
        # raises an Exception.
        for stride in range(1, 4):
            kshp = (10, 2, 10, 10)
            featshp = (3, 10, 11, 11)

            a = tensor.ftensor4()
            A = tensor.ftensor4()

            # Need to transpose first two dimensions of kernel, and reverse
            # index kernel image dims (for correlation)
            kernel_rotated = tensor.transpose(A, axes=[1, 0, 2, 3])

            featshp_logical = (featshp[0], featshp[1], featshp[2] * stride,
                               featshp[3] * stride)
            kshp_rotated = (kshp[1], kshp[0], kshp[2], kshp[3])
            self.assertRaises(ValueError, tensor.nnet.conv2d,
                              a, kernel_rotated,
                              border_mode='full',
                              image_shape=featshp,
                              filter_shape=kshp_rotated,
                              imshp_logical=featshp_logical[1:],
                              kshp_logical=kshp[2:])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dnn_conv_merge_mouts():
    # make sure it doesn't attempt to output/alpha merge a convolution
    # that has multiple clients.
    if not cuda.dnn.dnn_available():
        raise SkipTest(cuda.dnn.dnn_available.msg)
    img = T.ftensor4()
    kern = T.ftensor4()
    out = T.ftensor4()

    conv = dnn.dnn_conv(img, kern)

    lr = numpy.asarray(0.05, dtype='float32')

    if cuda.dnn.version() == -1:
        # Can't merge alpha with cudnn v1
        fr = conv + out
    else:
        fr = lr * (conv + out)
    rr = conv * lr

    f = theano.function([img, kern, out], [fr, rr], mode=mode_with_gpu)
    convs = [n for n in f.maker.fgraph.toposort()
             if isinstance(n.op, dnn.GpuDnnConv)]
    assert len(convs) == 1
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dnn_conv_merge_broad():
    # Make sure that we don't apply output_merge on broadcasted values.
    if not cuda.dnn.dnn_available():
        raise SkipTest(cuda.dnn.dnn_available.msg)
    img = T.ftensor4()
    kern = T.ftensor4()

    conv = dnn.dnn_conv(img, kern)

    lr = numpy.asarray(0.05, dtype='float32')

    # this does broadcasting
    fr = conv + lr

    f = theano.function([img, kern], [fr])
    convs = [n for n in f.maker.fgraph.toposort()
             if isinstance(n.op, dnn.GpuDnnConv)]
    assert len(convs) == 1
    conv = convs[0]
    # Assert output was not merged
    assert isinstance(conv.inputs[2].owner.op, GpuAllocEmpty)
项目:Synkhronos    作者:astooke    | 项目源码 | 文件源码
def build_train_func(rank=0, **kwargs):
    print("rank: {} Building model".format(rank))
    resnet = build_resnet()

    print("Building training function")
    x = T.ftensor4('x')
    y = T.imatrix('y')

    prob = L.get_output(resnet['prob'], x, deterministic=False)
    loss = T.nnet.categorical_crossentropy(prob, y.flatten()).mean()
    params = L.get_all_params(resnet.values(), trainable=True)

    sgd_updates = updates.sgd(loss, params, learning_rate=1e-4)

    # make a function to compute and store the raw gradient
    f_train = theano.function(inputs=[x, y],
                              outputs=loss,  # (assumes this is an avg)
                              updates=sgd_updates)

    return f_train, "original"
项目:cbof    作者:passalis    | 项目源码 | 文件源码
def __init__(self, pooling='spp', spatial_level=1, n_codewords=64, learning_rate=0.001):
        self.initializers = []

        input_var = T.ftensor4('input_var')
        target_var = T.ivector('targets')

        network = lasagne.layers.InputLayer(shape=(None, 1, None, None), input_var=input_var)
        network = lasagne.layers.Conv2DLayer(network, num_filters=32, filter_size=(5, 5),
                                             nonlinearity=lasagne.nonlinearities.rectify)
        network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
        network = lasagne.layers.Conv2DLayer(network, num_filters=64, filter_size=(5, 5),
                                             nonlinearity=lasagne.nonlinearities.rectify)
        if pooling == 'spp':
            network = lasagne.layers.SpatialPyramidPoolingLayer(network, pool_dims=[1, 2])
        elif pooling == 'bof':
            network = CBoF_Layer(network, input_var=input_var, initializers=self.initializers, n_codewords=n_codewords,
                                 spatial_level=spatial_level)

        network = lasagne.layers.dropout(network, p=.5)
        network = lasagne.layers.DenseLayer(network, num_units=1000, nonlinearity=lasagne.nonlinearities.elu)
        network = lasagne.layers.dropout(network, p=.5)
        network = lasagne.layers.DenseLayer(network, num_units=10, nonlinearity=lasagne.nonlinearities.softmax)
        self.network = network

        train_prediction = lasagne.layers.get_output(network, deterministic=False)
        test_prediction = lasagne.layers.get_output(network, deterministic=True)
        loss = lasagne.objectives.categorical_crossentropy(train_prediction, target_var).mean()

        self.params = lasagne.layers.get_all_params(network, trainable=True)
        updates = lasagne.updates.adam(loss, self.params, learning_rate=learning_rate)

        self.train_fn = theano.function([input_var, target_var], loss, updates=updates)
        self.test_fn = theano.function([input_var], T.argmax(test_prediction, axis=1))

        print "Model Compiled!"
项目:crossingNet    作者:melonwan    | 项目源码 | 文件源码
def linkSubNets(self, noiseInputVar=None):
        # for every subnet, the input is None
        if noiseInputVar is None:
            noiseInputVar = T.fmatrix('noise_input')

        self.noise_input_var = noiseInputVar
        self.depth_input_var = T.ftensor4('real_depth')
        self.gen_depth_var = lasagne.layers.get_output(self.gen_depth_layer,
                                                       self.noise_input_var,
                                                       deterministic=False)
        self.gen_depth_tvar = lasagne.layers.get_output(self.gen_depth_layer,
                                                        self.noise_input_var,
                                                       deterministic=True)
        real_var = self.depth_input_var
        fake_var = self.gen_depth_var

        self.real_feamat_var=T.mean(lasagne.layers.get_output(self.feamat_layer,
                                                                real_var),
                                      axis=0)
        self.fake_feamat_var=T.mean(lasagne.layers.get_output(self.feamat_layer,
                                                                fake_var),
                                      axis=0)
        self.px_real_var = lasagne.layers.get_output(self.dis_px_layer,
                                                     real_var) 
        self.px_fake_var = lasagne.layers.get_output(self.dis_px_layer,
                                                     fake_var)
项目:DeepMonster    作者:olimastro    | 项目源码 | 文件源码
def run(self):
        print "Starting tests..."
        print
        for feedforward, test_info in self.dict_of_test.iteritems():
            if len(test_info[0]) == 5:
                dtensor5 = T.TensorType('float32', (False,)*5)
                x = dtensor5('x')
            elif len(test_info[0]) == 4:
                x = T.ftensor4('x')
            elif len(test_info[0]) == 3:
                x = T.ftensor3('x')
            elif len(test_info[0]) == 2:
                x = T.fmatrix('x')

            print "Testing " + feedforward.prefix

            out = feedforward.fprop(x)
            f = theano.function([x], out)
            npx = np.random.random(test_info[0]).astype(np.float32)
            if self.mode is 'no_crash' :
                try:
                    out_shape = f(npx).shape
                    print out_shape
                except:
                    print "Error encountered in this network"
            else :
                out_shape = f(npx).shape
                print out_shape

            print
        print "Finished"
        print
项目:WebNav    作者:nyu-dl    | 项目源码 | 文件源码
def make_node(self, x, x2, x3, x4, x5):
        # check that the theano version has support for __props__.
        # This next line looks like it has a typo,
        # but it's actually a way to detect the theano version
        # is sufficiently recent to support the use of __props__.
        assert hasattr(self, '_props'), "Your version of theano is too old to support __props__."
        x = tensor.as_tensor_variable(x)
        x2 = tensor.as_tensor_variable(x2)
        x3 = tensor.as_tensor_variable(x3)
        x4 = tensor.as_tensor_variable(x4)
        x5 = tensor.as_tensor_variable(x5)

        if prm.att_doc:
            if prm.compute_emb:
                td = tensor.itensor4().type()
            else:
                td = tensor.ftensor4().type()
            tm = tensor.ftensor3().type()
        else:
            if prm.compute_emb:
                td = tensor.itensor3().type()
            else:
                td = tensor.ftensor3().type()
            tm = tensor.fmatrix().type()
        return theano.Apply(self, [x,x2,x3,x4,x5], [td, tm, \
                                           tensor.fmatrix().type(), tensor.ivector().type()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_lift_abstractconv_gpu_shape():
    prev = theano.config.on_opt_error
    try:
        theano.config.on_opt_error = 'raise'
        s = tensor.ivector()
        a = tensor.ftensor4()
        b = tensor.ftensor4()
        c = tensor.nnet.abstract_conv.AbstractConv2d_gradWeights()(a, b, s)
        theano.function([s, a, b], c, mode=mode_with_gpu)
    finally:
        theano.config.on_opt_error = prev
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_GpuCumsum4D(self):
        # Should not use the GPU version.
        x = T.ftensor4('x')
        f = theano.function([x], cumsum(x, axis=1), mode=self.mode)
        assert [n for n in f.maker.fgraph.toposort()
                if isinstance(n.op, CumsumOp)]
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dnn_conv_merge():
    # This test that we merge correctly multiple dnn_conv.
    if not dnn.dnn_available(test_ctx_name):
        raise SkipTest(dnn.dnn_available.msg)
    img_shp = [2, 5, 6, 8]
    kern_shp = [3, 5, 5, 6]
    img = T.ftensor4('img')
    kern = T.ftensor4('kern')
    out = T.ftensor4('out')
    desc = dnn.GpuDnnConvDesc(
        border_mode='valid')(kern.shape)

    # Test forward op
    o1 = dnn.dnn_conv(img, kern)
    o2 = dnn.dnn_conv(img, kern)
    f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)
    d1, d2 = f(numpy.random.rand(*img_shp).astype('float32'),
               numpy.random.rand(*kern_shp).astype('float32'))
    topo = f.maker.fgraph.toposort()
    assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConv)]) == 1

    # Test grad w op
    o1 = dnn.GpuDnnConvGradW()(img, kern, out, desc)
    o2 = dnn.GpuDnnConvGradW()(img, kern, out, desc)
    f = theano.function([img, kern, out], [o1, o2], mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradW)]) == 1

    # Test grad i op
    o1 = dnn.GpuDnnConvGradI()(img, kern, out, desc)
    o2 = dnn.GpuDnnConvGradI()(img, kern, out, desc)
    f = theano.function([img, kern, out], [o1, o2], mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradI)]) == 1
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_softmax(self):
        if not dnn.dnn_available(test_ctx_name):
            raise SkipTest(dnn.dnn_available.msg)
        t = T.ftensor4('t')
        rand_tensor = numpy.asarray(
            numpy.random.rand(5, 4, 3, 2),
            dtype='float32'
        )
        self._compile_and_check(
            [t],
            [dnn.GpuDnnSoftmax('accurate', 'channel')(t)],
            [rand_tensor],
            dnn.GpuDnnSoftmax
        )

        self._compile_and_check(
            [t],
            [
                T.grad(
                    dnn.GpuDnnSoftmax(
                        'accurate',
                        'channel'
                    )(t).mean(),
                    t
                )
            ],
            [rand_tensor],
            dnn.GpuDnnSoftmaxGrad
        )
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_conv(self, algo, border_mode, conv_mode):
        if algo == 'winograd' and dnn.version(raises=False) < 5000:
            raise SkipTest(dnn.dnn_available.msg)

        self._test_conv(T.ftensor4('img'),
                        T.ftensor4('kerns'),
                        T.ftensor4('out'),
                        numpy.random.rand(7, 2, 8, 4),
                        numpy.random.rand(8, 2, 4, 3),
                        border_mode,
                        conv_mode,
                        [(1, 1), (2, 2)],
                        algo)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_conv_gradw(self, border_mode, conv_mode):
        self._test_conv_gradw(T.ftensor4('img'),
                              T.ftensor4('kerns'),
                              T.ftensor4('out'),
                              numpy.random.rand(2, 5, 6, 8),
                              numpy.random.rand(2, 1, 5, 6),
                              border_mode,
                              conv_mode,
                              (1, 1))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_pool(self):
        if not dnn.dnn_available(test_ctx_name):
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4('img')
        img_val = numpy.asarray(
            numpy.random.rand(2, 3, 4, 5),
            dtype='float32'
        )

        # 'average_exc_pad' is disabled for versions < 4004
        if dnn.version(raises=False) < 4004:
            modes = ['max', 'average_inc_pad']
        else:
            modes = ['max', 'average_inc_pad', 'average_exc_pad']

        for params in product(
            [(1, 1), (2, 2), (3, 3)],
            [(1, 1), (2, 2), (3, 3)],
            modes
        ):
            self._compile_and_check(
                [img],
                [dnn.GpuDnnPool(mode=params[2])(img, params[0], params[1], (0, 0))],
                [img_val],
                dnn.GpuDnnPool
            )
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dnn_conv_border_mode():
    if not dnn.dnn_available(test_ctx_name):
        raise SkipTest(dnn.dnn_available.msg)
    img = T.ftensor4()
    kern = T.ftensor4()

    dnn.dnn_conv(img, kern, border_mode=1)
    dnn.dnn_conv(img, kern, border_mode=(2, 3))
    dnn.dnn_conv(img, kern, border_mode='full')
    dnn.dnn_conv(img, kern, border_mode='valid')
    dnn.dnn_conv(img, kern, border_mode='half')
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_log_softmax(self):
        # This is a test for an optimization that depends on cuDNN v3 or
        # more recent. Don't test if the cuDNN version is too old.
        if dnn.version(raises=False) < 3000:
            raise SkipTest("Log-softmax is only in cudnn v3+")

        x = T.ftensor4()
        softmax_out = dnn.GpuDnnSoftmax('accurate', 'channel')(x)
        log_out = T.log(T.as_tensor_variable(softmax_out))

        f = theano.function([x], log_out, mode=mode_with_gpu)

        # Ensure that the optimization has been applied
        dnn_softmax_nodes = [n for n in f.maker.fgraph.toposort() if
                             isinstance(n.op, dnn.GpuDnnSoftmax)]
        assert len(dnn_softmax_nodes) == 1
        assert dnn_softmax_nodes[0].op.algo == "log"

        # Ensure that the output of the function is valid
        input_shapes = [(3, 4, 5, 6),
                        (1025, 2, 3, 4),
                        (2, 1025, 3, 4),
                        (2, 3, 1025, 4),
                        (2, 3, 4, 1025),
                        (66000, 2, 3, 4),
                        (2, 66000, 3, 4),
                        (2, 3, 66000, 4),
                        (2, 3, 4, 66000)]

        for inp_shape in input_shapes:
            input_val = numpy.random.normal(0, 1, inp_shape).astype("float32")

            out = f(input_val)
            expected_out = numpy.log(numpy.exp(input_val) /
                                     numpy.exp(input_val).sum(1)[:, None, :, :])

            utt.assert_allclose(out, expected_out)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_old_pool_interface(self):
        if sys.version_info[0] != 3:
            # Only tested with python 3 because of pickling issues.
            raise SkipTest('Skip old pool interface with python 2.x')
        # 1. Load the old version
        testfile_dir = os.path.dirname(os.path.realpath(__file__))
        fname = 'old_pool_interface.pkl'
        with open(os.path.join(testfile_dir, fname), 'rb') as fp:
            try:
                old_fct = cPickle.load(fp, encoding='latin1')
            except ImportError:
                # Windows sometimes fail with nonsensical errors like:
                #   ImportError: No module named type
                #   ImportError: No module named copy_reg
                # when "type" and "copy_reg" are builtin modules.
                if sys.platform == 'win32':
                    exc_type, exc_value, exc_trace = sys.exc_info()
                    reraise(SkipTest, exc_value, exc_trace)
                raise
        # 2. Create the new version
        x = theano.tensor.ftensor4()
        y = pool_2d(x, (2, 2), mode='max', ignore_border=True)
        z = pool_2d(x, (2, 2), mode='average_exc_pad', ignore_border=True)
        dy_dx = theano.gradient.grad(y.sum(), x)
        dz_dx = theano.gradient.grad(z.sum(), x)
        new_fct = theano.function([x], [y, z, dy_dx, dz_dx])
        # 3. Assert that the answer is the same
        rng = numpy.random.RandomState(utt.fetch_seed())
        image_val = rng.rand(4, 6, 7, 9).astype(numpy.float32)
        old_out = old_fct(image_val)
        new_out = new_fct(image_val)
        for o, n in zip(old_out, new_out):
            utt.assert_allclose(o, n)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_sparseblockgemvF(self):
        """
            Test the fortan order for W (which can happen in the grad for some
            graphs).
        """
        b = tensor.fmatrix()
        W = tensor.ftensor4()
        h = tensor.ftensor3()
        iIdx = tensor.imatrix()
        oIdx = tensor.imatrix()

        o = self.gemv_op(b.take(oIdx, axis=0),
                         tensor.DimShuffle((False, False, False, False),
                                           (0, 1, 3, 2))
                         (tensor.as_tensor_variable(W)),
                         h, iIdx, oIdx)

        f = theano.function([W, h, iIdx, b, oIdx], o, mode=self.mode)

        W_val, h_val, iIdx_val, b_val, oIdx_val = \
            BlockSparse_Gemv_and_Outer.gemv_data()

        th_out = f(numpy.swapaxes(W_val, 2, 3), h_val, iIdx_val, b_val,
                   oIdx_val)
        ref_out = BlockSparse_Gemv_and_Outer.gemv_numpy(
            b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val)

        utt.assert_allclose(ref_out, th_out)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_gemv_infershape(self):
        b = tensor.fmatrix()
        W = tensor.ftensor4()
        h = tensor.ftensor3()
        iIdx = tensor.imatrix()
        oIdx = tensor.imatrix()

        self._compile_and_check(
            [W, h, iIdx, b, oIdx],
            [self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)],
            self.gemv_data(),
            self.gemv_class)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_outer_infershape(self):
        o = tensor.ftensor4()
        x = tensor.ftensor3()
        y = tensor.ftensor3()
        xIdx = tensor.imatrix()
        yIdx = tensor.imatrix()

        self._compile_and_check([o, x, y, xIdx, yIdx],
                                [self.outer_op(o, x, y, xIdx, yIdx)],
                                self.outer_data(),
                                self.outer_class)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_infer_shape(self):
        shape = (100, 40, 6, 3)
        images = numpy.ones(shape).astype('float32')
        x = T.ftensor4()
        self._compile_and_check(
            [x], [images2neibs(x, neib_shape=(2, 1), mode='valid')],
            [images], Images2Neibs)
        self._compile_and_check(
            [x], [images2neibs(x, neib_shape=(2, 3), mode='valid')],
            [images], Images2Neibs)
        shape = (100, 40, 5, 4)
        images = numpy.ones(shape).astype('float32')
        x = T.ftensor4()
        self._compile_and_check(
            [x], [images2neibs(
                x, neib_shape=(2, 1), mode='ignore_borders')],
            [images], Images2Neibs)
        shape = (100, 40, 5, 3)
        images = numpy.ones(shape).astype('float32')
        x = T.ftensor4()
        self._compile_and_check(
            [x], [images2neibs(
                x, neib_shape=(2, 3), mode='ignore_borders')],
            [images], Images2Neibs)

        shape = (100, 40, 6, 7)
        images = numpy.ones(shape).astype('float32')
        x = T.ftensor4()
        self._compile_and_check(
            [x], [images2neibs(
                x, neib_shape=(2, 2), mode='ignore_borders')],
            [images], Images2Neibs)
        shape = (100, 40, 5, 10)
        images = numpy.ones(shape).astype('float32')
        x = T.ftensor4()
        self._compile_and_check(
            [x], [images2neibs(
                x, neib_shape=(3, 3), mode='wrap_centered')],
            [images], Images2Neibs)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_blocksparse_gpu_gemv_opt():
    b = tensor.fmatrix()
    W = tensor.ftensor4()
    h = tensor.ftensor3()
    iIdx = tensor.lmatrix()
    oIdx = tensor.lmatrix()

    o = sparse_block_dot(W, h, iIdx, b, oIdx)

    f = theano.function([W, h, iIdx, b, oIdx], o, mode=mode_with_gpu)

    assert sum(1 for n in f.maker.fgraph.apply_nodes
               if isinstance(n.op, GpuSparseBlockGemv)) == 1
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_blocksparse_gpu_outer_opt():
    b = tensor.fmatrix()
    W = tensor.ftensor4()
    h = tensor.ftensor3()
    iIdx = tensor.lmatrix()
    oIdx = tensor.lmatrix()

    o = sparse_block_dot(W, h, iIdx, b, oIdx)

    f = theano.function([W, h, iIdx, b, oIdx], [o, tensor.grad(o.sum(),
                                                               wrt=W)],
                        mode=mode_with_gpu)

    assert sum(1 for n in f.maker.fgraph.apply_nodes
               if isinstance(n.op, GpuSparseBlockOuter)) == 1
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_GpuCumsum4D(self):
        # Should not use the GPU version.
        x = T.ftensor4('x')
        f = theano.function([x], cumsum(x, axis=1), mode=self.mode)
        assert [n for n in f.maker.fgraph.toposort()
                if isinstance(n.op, CumsumOp)]
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def _run_onecase(self, img_shape, kern_shape, padding, op):
        npy_img = numpy.random.rand(*img_shape).astype('float32')
        npy_kern = numpy.random.rand(*kern_shape).astype('float32')
        img = theano._asarray(npy_img, dtype='float32')
        kern = theano.shared(npy_kern)
        border_mode = padding
        cpuval = py_conv(npy_img, npy_kern, border_mode, (1, 1))
        X = tensor.ftensor4()
        Y = op(X, kern, border_mode=border_mode)
        func = theano.function([X], Y, mode=theano_mode)
        gpuval = numpy.asarray(func(img))
        assert_allclose(cpuval, gpuval, rtol=1e-5, atol=1e-5)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dnn_conv_merge():
    """This test that we merge correctly multiple dnn_conv.

    This can is more difficult due to GpuEmptyAlloc that aren't
    merged.

    """
    if not cuda.dnn.dnn_available():
        raise SkipTest(cuda.dnn.dnn_available.msg)
    img_shp = [2, 5, 6, 8]
    kern_shp = [3, 5, 5, 6]
    img = T.ftensor4('img')
    kern = T.ftensor4('kern')
    out = T.ftensor4('out')
    desc = dnn.GpuDnnConvDesc(
        border_mode='valid')(img.shape, kern.shape)

    # Test forward op
    o1 = dnn.dnn_conv(img, kern)
    o2 = dnn.dnn_conv(img, kern)
    f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)
    d1, d2 = f(numpy.random.rand(*img_shp).astype('float32'),
               numpy.random.rand(*kern_shp).astype('float32'))
    topo = f.maker.fgraph.toposort()
    assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConv)]) == 1

    # Test grad w op
    o1 = dnn.GpuDnnConvGradW()(img, kern, out, desc)
    o2 = dnn.GpuDnnConvGradW()(img, kern, out, desc)
    f = theano.function([img, kern, out], [o1, o2], mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradW)]) == 1

    # Test grad i op
    o1 = dnn.GpuDnnConvGradI()(img, kern, out, desc)
    o2 = dnn.GpuDnnConvGradI()(img, kern, out, desc)
    f = theano.function([img, kern, out], [o1, o2], mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradI)]) == 1
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_log_softmax(self):
        # This is a test for an optimization that depends on cuDNN v3 or
        # more recent. Don't test if the cuDNN version is too old.
        if cuda.dnn.version() < (3000, 3000):
            raise SkipTest("Log-softmax is only in cudnn v3+")

        x = T.ftensor4()
        softmax_out = dnn.GpuDnnSoftmax('bc01', 'accurate', 'channel')(x)
        log_out = T.log(T.as_tensor_variable(softmax_out))

        f = theano.function([x], log_out, mode=mode_with_gpu)

        # Ensure that the optimization has been applied
        dnn_softmax_nodes = [n for n in f.maker.fgraph.toposort() if
                             isinstance(n.op, cuda.dnn.GpuDnnSoftmax)]
        assert len(dnn_softmax_nodes) == 1
        assert dnn_softmax_nodes[0].op.algo == "log"

        # Ensure that the output of the function is valid
        input_shapes = [(3, 4, 5, 6),
                        (1025, 2, 3, 4),
                        (2, 1025, 3, 4),
                        (2, 3, 1025, 4),
                        (2, 3, 4, 1025),
                        (66000, 2, 3, 4),
                        (2, 66000, 3, 4),
                        (2, 3, 66000, 4),
                        (2, 3, 4, 66000)]

        for inp_shape in input_shapes:
            input_val = numpy.random.normal(0, 1, inp_shape).astype("float32")

            out = f(input_val)
            expected_out = numpy.log(
                numpy.exp(input_val) /
                numpy.exp(input_val).sum(1)[:, None, :, :])

            utt.assert_allclose(out, expected_out)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dnn_tag():
    """
    Test that if cudnn isn't avail we crash and that if it is avail, we use it.
    """
    x = T.ftensor4()
    old = theano.config.on_opt_error
    theano.config.on_opt_error = "raise"

    sio = StringIO()
    handler = logging.StreamHandler(sio)
    logging.getLogger('theano.compile.tests.test_dnn').addHandler(handler)
    # Silence original handler when intentionnally generating warning messages
    logging.getLogger('theano').removeHandler(theano.logging_default_handler)
    raised = False
    try:
        f = theano.function(
            [x],
            pool_2d(x, ds=(2, 2), ignore_border=True),
            mode=mode_with_gpu.including("cudnn"))
    except (AssertionError, RuntimeError):
        assert not cuda.dnn.dnn_available()
        raised = True
    finally:
        theano.config.on_opt_error = old
        logging.getLogger(
            'theano.compile.tests.test_dnn').removeHandler(handler)
        logging.getLogger('theano').addHandler(theano.logging_default_handler)

    if not raised:
        assert cuda.dnn.dnn_available()
        assert any([isinstance(n.op, cuda.dnn.GpuDnnPool)
                    for n in f.maker.fgraph.toposort()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_conv(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4('img')
        kerns = T.ftensor4('kerns')
        out = T.ftensor4('out')
        img_val = numpy.asarray(
            numpy.random.rand(10, 2, 6, 4),
            dtype='float32'
        )
        kern_vals = numpy.asarray(
            numpy.random.rand(8, 2, 4, 3),
            dtype='float32'
        )

        for params in product(
            ['valid', 'full', 'half'],
            [(1, 1), (2, 2)],
            ['conv', 'cross']
        ):
            out_vals = numpy.zeros(
                dnn.GpuDnnConv.get_out_shape(img_val.shape, kern_vals.shape,
                                             border_mode=params[0],
                                             subsample=params[1]),
                dtype='float32')
            desc = dnn.GpuDnnConvDesc(
                border_mode=params[0],
                subsample=params[1],
                conv_mode=params[2]
            )(img.shape, kerns.shape)
            conv = dnn.GpuDnnConv()(img, kerns, out, desc)
            self._compile_and_check(
                [img, kerns, out],
                [conv],
                [img_val, kern_vals, out_vals],
                dnn.GpuDnnConv
            )
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_pool(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4('img')
        img_val = numpy.asarray(
            numpy.random.rand(2, 3, 4, 5),
            dtype='float32'
        )

        # 'average_exc_pad' is disabled for versions < 4004
        if cuda.dnn.version() < (4004, 4004):
            modes = ['max', 'average_inc_pad']
        else:
            modes = ['max', 'average_inc_pad', 'average_exc_pad']

        for params in product(
            [(1, 1), (2, 2), (3, 3)],
            [(1, 1), (2, 2), (3, 3)],
            modes
        ):
            self._compile_and_check(
                [img],
                [dnn.GpuDnnPool(mode=params[2])
                               (img, params[0], params[1], (0, 0))],
                [img_val],
                dnn.GpuDnnPool
            )
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_pool_grad(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4('img')
        img_grad = T.ftensor4('img_grad')
        out = T.ftensor4('out')
        img_val = numpy.asarray(
            numpy.random.rand(2, 3, 4, 5),
            dtype='float32'
        )
        img_grad_val = numpy.asarray(
            numpy.random.rand(2, 3, 4, 5),
            dtype='float32'
        )
        out_val = numpy.asarray(
            numpy.random.rand(2, 3, 4, 5),
            dtype='float32'
        )

        for params in product(
            [(1, 1), (2, 2), (3, 3)],
            [(1, 1), (2, 2), (3, 3)],
            ['max', 'average_inc_pad']
        ):
            pool_grad = dnn.GpuDnnPoolGrad()(
                img,
                out,
                img_grad,
                params[0],
                params[1],
                (0, 0)
            )
            self._compile_and_check(
                [img, img_grad, out],
                [pool_grad],
                [img_val, img_grad_val, out_val],
                dnn.GpuDnnPoolGrad
            )
项目:crayimage    作者:yandexdataschool    | 项目源码 | 文件源码
def test_generators(self):
    from crayimage.cosmicGAN import ToyTrueTrackGenerator, ToyTrackGenerator
    X = T.ftensor4()
    _ = ToyTrueTrackGenerator(X, input_shape=(1, 128, 128))
    _ = ToyTrackGenerator(X, input_shape=(1, 128, 128))
项目:Synkhronos    作者:astooke    | 项目源码 | 文件源码
def build_training(resnet, params, update_rule, **update_kwargs):

    print("Building training functions")
    x = T.ftensor4('x')
    y = T.imatrix('y')

    prob = L.get_output(resnet['prob'], x, deterministic=False)
    loss = T.nnet.categorical_crossentropy(prob, y.flatten()).mean()

    grad_updates, param_updates, grad_shared = \
        update_rule(loss, params, **update_kwargs)
    # make a function to compute and store the raw gradient
    f_grad_shared = theano.function(inputs=[x, y],
                                    outputs=loss,  # (assumes this is an avg)
                                    updates=grad_updates)
    # make a function to update parameters using stored gradient
    f_param_update = theano.function(inputs=[], updates=param_updates)

    def f_train_minibatch(x_data, y_data):
        train_loss = f_grad_shared(x_data, y_data)
        # No all-reduce here; single-GPU
        f_param_update()
        return train_loss

    print("Building validation / test function")
    v_prob = L.get_output(resnet['prob'], x, deterministic=True)
    v_loss = T.nnet.categorical_crossentropy(v_prob, y.flatten()).mean()
    v_mc = T.mean(T.neq(T.argmax(v_prob, axis=1), y.flatten()))
    f_predict = theano.function(inputs=[x, y], outputs=[v_loss, v_mc])

    return f_train_minibatch, f_predict
项目:Synkhronos    作者:astooke    | 项目源码 | 文件源码
def build_training(resnet, params, update_rule, **update_kwargs):

    print("Building training functions")
    x = T.ftensor4('x')
    y = T.imatrix('y')

    prob = L.get_output(resnet['prob'], x, deterministic=False)
    loss = T.nnet.categorical_crossentropy(prob, y.flatten()).mean()

    grad_updates, param_updates, grad_shared = \
        update_rule(loss, params, **update_kwargs)
    # make a function to compute and store the worker's raw gradient
    f_grad_shared = synk.function(inputs=[x, y],
                                  outputs=loss,  # (assumes this is an avg)
                                  updates=grad_updates)
    # make a function to update worker's parameters using stored gradient
    f_param_update = synk.function(inputs=[], updates=param_updates)

    def f_train_minibatch(x_data, y_data, batch):
        # compute worker gradient; average across GPUs; update worker params
        # (alternatively, could update parameters only in master, then broadcast,
        # but that costs more communication)
        train_loss = f_grad_shared(x_data, y_data, batch=batch)
        synk.all_reduce(grad_shared, op="avg")  # (assumes loss is an avg)
        f_param_update()
        return train_loss

    print("Building validation / test function")
    v_prob = L.get_output(resnet['prob'], x, deterministic=True)
    v_loss = T.nnet.categorical_crossentropy(v_prob, y.flatten()).mean()
    v_mc = T.mean(T.neq(T.argmax(v_prob, axis=1), y.flatten()))
    f_predict = synk.function(inputs=[x, y], outputs=[v_loss, v_mc])

    return f_train_minibatch, f_predict
项目:cbof    作者:passalis    | 项目源码 | 文件源码
def __init__(self, n_classes=10, learning_rate=0.00001, bof_layer=(4, 0, 128), hidden_neurons=(1000,),
                 dropout=(0.5,), feature_dropout=0, g=0.1):

        Base_Learner.__init__(self)

        input_var = T.ftensor4('inputs')
        target_var = T.ivector('targets')

        # Create the CNN feature extractor
        self.cnn_layer = CNN_Feature_Extractor(input_var, size=None)

        # Create the BoF layer
        (cnn_layer_id, spatial_level, n_codewords) = bof_layer
        self.bof_layer = CBoF_Input_Layer(input_var, self.cnn_layer, cnn_layer_id, level=spatial_level,
                                          n_codewords=n_codewords, g=g, pyramid=False)
        features = self.bof_layer.fused_features
        n_size_features = self.bof_layer.features_size

        # Create an output MLP
        network = lasagne.layers.InputLayer(shape=(None, n_size_features), input_var=features)
        if feature_dropout > 0:
            network = lasagne.layers.DropoutLayer(network, p=feature_dropout)
        for n, drop_rate in zip(hidden_neurons, dropout):
            network = lasagne.layers.DenseLayer(network, num_units=n, nonlinearity=lasagne.nonlinearities.elu,
                                                W=lasagne.init.Orthogonal())
            network = lasagne.layers.DropoutLayer(network, p=drop_rate)

        network = lasagne.layers.DenseLayer(network, num_units=n_classes,
                                            nonlinearity=lasagne.nonlinearities.softmax,
                                                W=lasagne.init.Normal(std=1))
        # Get network loss
        self.prediction_train = lasagne.layers.get_output(network, deterministic=False)
        loss = lasagne.objectives.categorical_crossentropy(self.prediction_train, target_var).mean()

        # Define training rules
        params_mlp = lasagne.layers.get_all_params(network, trainable=True)
        updates_mlp = lasagne.updates.adam(loss, params_mlp, learning_rate=learning_rate)
        updates = lasagne.updates.adam(loss, params_mlp, learning_rate=learning_rate)
        updates.update(lasagne.updates.adam(loss, self.cnn_layer.layer_params[cnn_layer_id],
                                            learning_rate=learning_rate))
        updates.update(lasagne.updates.adam(loss, self.bof_layer.V, learning_rate=learning_rate))
        updates.update(lasagne.updates.adam(loss, self.bof_layer.sigma, learning_rate=learning_rate))

        # Define testing/validation
        prediction_test = lasagne.layers.get_output(network, deterministic=True)

        # Compile functions
        self.train_fn = theano.function([input_var, target_var], loss, updates=updates)
        self.train_mlp_fn = theano.function([input_var, target_var], loss, updates=updates_mlp)
        self.test_fn = theano.function([input_var], T.argmax(prediction_test, axis=1))

        # Get the output of the bof module
        self.get_features_fn = theano.function([input_var], features)
项目:crossingNet    作者:melonwan    | 项目源码 | 文件源码
def genLossAndGradient(self):
        #establish loss
        self.pose_input_var = self.pose_vae.pose_input_var
        self.noise_input_var = self.pose_vae.noise_input_var
        self.real_depth_var = T.ftensor4('real_depth')
        self.pixel_loss = lasagne.objectives.squared_error(self.render_var,
                                                self.real_depth_var)
        self.pixel_loss = lasagne.objectives.aggregate(self.pixel_loss, 
                                                      mode='mean')

        #calculate gradient
        print 'param: {}'.format(self.params)
        self.updates = lasagne.updates.adam(self.pixel_loss, self.params,
                                            self.lr, self.b1)
        #compile function
        self.train_fn = theano.function(
            [self.pose_input_var, 
             self.origin_input_var,
             # self.quad_input_var,
             self.noise_input_var, 
             self.real_depth_var],
            self.pixel_loss,
            updates = self.updates
        )
        self.render_fn = theano.function(
            [self.pose_input_var,
             self.origin_input_var,
             # self.quad_input_var,
             self.noise_input_var],
            self.render_tvar
        )

        updates = lasagne.updates.adam(self.pixel_loss, self.alignment_params,
                                      self.lr, self.b1) 
        self.alignment_train_fn = theano.function(
            [self.pose_input_var, 
             self.origin_input_var,
             # self.quad_input_var,
             self.noise_input_var, 
             self.real_depth_var],
            self.pixel_loss,
            updates = updates
        )
        print 'function compiled'
项目:crossingNet    作者:melonwan    | 项目源码 | 文件源码
def genTestEstGrad(self):
        def updateFn(est_z_var, given_x_var):
            # output of render image given est_z_var
            align_var = lasagne.layers.get_output(self.alignment_layer,
                                                  est_z_var,
                                                  deterministic=True)
            est_x_var = lasagne.layers.get_output(self.render_layer,
                                                  inputs=align_var,
                                                  deterministic=True)
            # test_pixel_loss = abs(given_x_var - est_x_var)
            test_pixel_loss = (given_x_var - est_x_var)**2
            # test_pixel_loss = T.clip(test_pixel_loss, 0, self.golden_max)
            test_pixel_loss = lasagne.objectives.aggregate(
                test_pixel_loss, mode='mean')

            est_feat_var = lasagne.layers.get_output(self.metric_layer,
                                                    inputs=est_x_var,
                                                    deterministic=True)
            giv_feat_var = lasagne.layers.get_output(self.metric_layer,
                                                    inputs=given_x_var,
                                                    deterministic=True)
            if self.metricCombi:
                metric_combi_var = T.concatenate([giv_feat_var,est_feat_var],
                                                axis=1)
                learned_update = lasagne.layers.get_output(self.metric_combilayer,
                                                          metric_combi_var)
            else:
                learned_update = giv_feat_var - est_feat_var 

            learned_update *= -1

            test_metric_loss = learned_update**2
            test_metric_loss = T.mean(test_metric_loss)

            # test_loss = test_pixel_loss + test_metric_loss*0.1
            test_loss = test_metric_loss
            # test_loss = test_pixel_loss

            # dz_var = theano.grad(test_loss, est_z_var)
            dz_var = learned_update 
            return est_z_var-0.1*dz_var

        # our initial estimation
        est_z_var = T.fmatrix('est_z')
        # given image during test time
        given_x_var = T.ftensor4('given_x')
        # number of updates
        K = T.iscalar('K')
        new_z, updates = theano.scan(
            fn=updateFn,
            outputs_info=[est_z_var],
            non_sequences=[given_x_var],
            n_steps=K
        )
        new_z = new_z[-1]
        print 'scan loop calculated'

        self.search_fn = theano.function(inputs=[est_z_var, given_x_var, K], 
                                         outputs=new_z,
                                         updates=updates)
        print 'scan gradient function compiled'
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def run_cnn_on_mnist():
    train, test, valid = load_data('mnist.pkl.gz')

    trX = train[0]
    trY = make_one_hot(train[1])

    teX = test[0]
    teY = make_one_hot(test[1])

    print 'trx shape b4:', trX.shape
    print 'trY shape b4:', trY.shape
    trX = trX.reshape(-1, 1, 28, 28)
    teX = teX.reshape(-1, 1, 28, 28)

    print 'trx shape after:', trX.shape

    X = T.ftensor4()
    Y = T.fmatrix()

    w1 = init_weights((128, 1, 3, 3))
    w2 = init_weights((128, 128, 3, 3))
    w3 = init_weights((128, 128, 3, 3))
    w4 = init_weights((128 * 3 * 3 , 1024))
    w_o = init_weights((1024, 10))

    l1n, l2n, l3n, l4n, py_x = cnn_model(X, w1, w2, w3, w4, w_o, 0.2, 0.5)
    l1, l2, l3, l4, py_x = cnn_model(X, w1, w2, w3, w4, w_o, 0.0, 0.0)
    y_x = T.argmax(py_x, axis=1)

    cost = T.mean(T.nnet.categorical_crossentropy(py_x, Y))
    params =[w1, w2, w3, w4, w_o]
    updates = RMSprop(cost, params)

    train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)
    predict = theano.function(inputs=[X], outputs=y_x, allow_input_downcast=True)
    get_hidden_weights = theano.function(inputs=[], outputs=w_o)

    batch_size = 128;
    print 'start training...'
    for i in range(100):
        for start, end in zip(range(0, len(trX), batch_size), range(batch_size, len(trX), batch_size)):
            cost = train(trX[start:end], trY[start:end])
        print 'On training batch', i, ' training accuracy', np.mean(np.argmax(trY[start:end], axis=1) == predict(trX[start:end]))   
        print 'Average validation accuracy', np.mean(np.argmax(teY, axis=1) == predict(teX))