Python theano.tensor 模块,fmatrix() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.fmatrix()

项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def get_eval_fn(model, in3D=False, use_dice=False):
    """Compile the evaluation function of the model."""
    if use_dice:
        insec = T.sum(model.trg * model.output, axis=1)
        tmp = 1 - 2.0 * insec/(T.sum(model.trg, axis=1) + T.sum(model.output,
                               axis=1))
        error = T.mean(tmp)
    else:
        error = T.mean(T.mean(T.power(model.output - model.trg, 2), axis=1))
    if in3D:
        x = T.tensor4('x')
    else:
        x = T.fmatrix("x")
    y = T.fmatrix("y")

    theano_arg_vl = [x, y]
    output_fn_vl = [error, model.output]

    eval_fn = theano.function(
        theano_arg_vl, output_fn_vl,
        givens={model.x: x,
                model.trg: y})

    return eval_fn
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def sample_scan(self, x, sigma, n_steps, samples):
        # Enable on-the-fly graph computations
        #  theano.config.compute_test_value = "raise"
        in_val = T.fmatrix("input_values")
        # in_val.tag.test_value = np.asarray(
        #    np.random.rand(1, 784), dtype=theano.config.floatX)
        s_sigma = T.fscalr("sigma_values")
        # s_sigma = np.asarray(
        #    np.random.rand(1), dtype=theano.config.floatX)
        mode = "FAST_RUN"
        values, updates = theano.scan(fn=self.sample_one_step,
                                      outputs_info=in_val,
                                      non_sequences=s_sigma,
                                      n_steps=n_steps,
                                      mode=mode)
        ae_sampler = theano.function(inputs=[in_val, s_sigma],
                                     outputs=values[-1],
                                     updates=updates)
        samples = ae_sampler(x, sigma)
        return samples
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def sample_old(self, x, sigma, n_steps):
        # Enable on-the-fly graph computations
        # theano.config.compute_test_value = "raise"
        # in_val = T.fmatrix('input_values")
        # in_val.tag.test_value = np.asarray(
        #   np.random.rand(1, 784), dtype=theano.config.floatX)
        # s_sigma = T.fscalar("sigma_value")
        # s_sigma = np.asarray(
        #   np.random.rand(1), dtype=theano.config.floatX)
        # mode = "FAST_RUN"
        samples = []
        sample = x
        samples.append(x)
        for i in xrange(n_steps):
            print "Sample %d ..." % i
            sampler = self.sample_one_step(sample, sigma)
            sample = sampler.eval()
            samples.append(sample)
        return samples
项目:e2e-ie-release    作者:rasmusbergpalm    | 项目源码 | 文件源码
def test_get_output_for(self):
        keys_var = T.ftensor3()
        values_var = T.ftensor3()
        mask_var = T.fmatrix()
        queries_var = T.ftensor3()

        keys_layer = L.InputLayer((None, None, 3), input_var=keys_var)
        values_layer = L.InputLayer((None, None, 5), input_var=values_var)
        mask_layer = L.InputLayer((None, None), input_var=mask_var)
        queries_layer = L.InputLayer((None, None, 7), input_var=queries_var)

        attention_layer = BahdanauKeyValueAttentionLayer([keys_layer, values_layer, mask_layer, queries_layer], 9)

        attention_outputs = L.get_output(attention_layer)

        fn = theano.function([keys_var, values_var, mask_var, queries_var], attention_outputs, on_unused_input='warn')

        keys = np.random.rand(32, 13, 3).astype(np.float32)
        values = np.random.rand(32, 13, 5).astype(np.float32)
        mask = np.random.rand(32, 13).astype(np.float32)
        queries = np.random.rand(32, 17, 7).astype(np.float32)

        _att = fn(keys, values, mask, queries)

        self.assertEqual((32, 17, 5), _att.shape)
项目:peters-stuff    作者:peterneher    | 项目源码 | 文件源码
def setup(self, bottom, top):
        # check input pair
        if len(bottom) != 2:
            raise Exception("Need two inputs to compute the dice. the result of the softmax and the ground truth.")

        if len(bottom[0].data.shape)==4 :
            self.prediction = T.fmatrix()
            self.ground_truth = T.fmatrix()
        elif len(bottom[0].data.shape)==5 :
            self.prediction = T.ftensor3()
            self.ground_truth = T.ftensor3()
        else:
            raise Exception('DiceIndexLayer only supports 2D or 3D data at the moment.')

        intersection = T.sum(self.prediction * self.ground_truth)
        denominator = T.sum(self.prediction) + T.sum(self.ground_truth)
        dice = 2 * intersection / (denominator + 0.00001)

        self.f = theano.function([self.prediction, self.ground_truth], dice)

        top[0].reshape(1)
项目:visually-grounded-speech    作者:gchrupala    | 项目源码 | 文件源码
def __init__(self, config):
        autoassign(locals())
        self.margin_size = config.get('margin_size', 0.2)
        self.updater = util.Adam(max_norm=config['max_norm'], lr=config['lr'])
        self.Encode = Encoder(config['size_vocab'],
                              config['size_embed'], 
                              config['size'],
                              depth=config.get('depth', 1),
                              recur_depth=config.get('recur_depth',1),
                              drop_i=config.get('drop_i', 0.75),
                              drop_s=config.get('drop_s', 0.25),
                              residual=config.get('residual', False),
                              seed=config.get('seed', 1))                              
        self.ImgEncoder  = Dense(config['size_target'], config['size'], init=eval(config.get('init_img', 'orthogonal')))
        self.inputs = [T.imatrix()]
        self.target = T.fmatrix()
项目:visually-grounded-speech    作者:gchrupala    | 项目源码 | 文件源码
def __init__(self, config):
        autoassign(locals())
        self.margin_size = config.get('margin_size', 0.2)
        self.updater = util.Adam(max_norm=config['max_norm'], lr=config['lr'])
        self.Encode = Encoder(config['size_vocab'],
                              config['size'],
                              filter_length=config.get('filter_length', 6),
                              filter_size=config.get('filter_size', 1024),
                              stride=config.get('stride', 3),
                              depth=config.get('depth', 1),
                              recur_depth=config.get('recur_depth',1),
                              drop_i=config.get('drop_i', 0.75),
                              drop_s=config.get('drop_s', 0.25),
                              residual=config.get('residual', False),
                              seed=config.get('seed', 1))
        self.Attn   = Attention(config['size'], size=config.get('size_attn', 512))
        self.ImgEncoder  = Dense(config['size_target'], config['size'])
        self.inputs = [T.ftensor3()]
        self.target = T.fmatrix()
项目:learning-class-invariant-features    作者:sbelharbi    | 项目源码 | 文件源码
def sample_scan(self, x, sigma, n_steps, samples):
        # Enable on-the-fly graph computations
        #  theano.config.compute_test_value = "raise"
        in_val = T.fmatrix("input_values")
        # in_val.tag.test_value = np.asarray(
        #    np.random.rand(1, 784), dtype=theano.config.floatX)
        s_sigma = T.fscalr("sigma_values")
        # s_sigma = np.asarray(
        #    np.random.rand(1), dtype=theano.config.floatX)
        mode = "FAST_RUN"
        values, updates = theano.scan(fn=self.sample_one_step,
                                      outputs_info=in_val,
                                      non_sequences=s_sigma,
                                      n_steps=n_steps,
                                      mode=mode)
        ae_sampler = theano.function(inputs=[in_val, s_sigma],
                                     outputs=values[-1],
                                     updates=updates)
        samples = ae_sampler(x, sigma)
        return samples
项目:learning-class-invariant-features    作者:sbelharbi    | 项目源码 | 文件源码
def sample_old(self, x, sigma, n_steps):
        # Enable on-the-fly graph computations
        # theano.config.compute_test_value = "raise"
        # in_val = T.fmatrix('input_values")
        # in_val.tag.test_value = np.asarray(
        #   np.random.rand(1, 784), dtype=theano.config.floatX)
        # s_sigma = T.fscalar("sigma_value")
        # s_sigma = np.asarray(
        #   np.random.rand(1), dtype=theano.config.floatX)
        # mode = "FAST_RUN"
        samples = []
        sample = x
        samples.append(x)
        for i in xrange(n_steps):
            print "Sample %d ..." % i
            sampler = self.sample_one_step(sample, sigma)
            sample = sampler.eval()
            samples.append(sample)
        return samples
项目:lstmprovisor-python    作者:Impro-Visor    | 项目源码 | 文件源码
def test_frag_queue():
    feature_strengths = T.fmatrix()
    feature_vects = T.ftensor3()
    peek_strengths, res = QueueManager.queue_transform(feature_strengths, feature_vects, True)
    grad_s, grad_v = theano.gradient.grad(T.sum(res[:,:,1]), [feature_strengths,feature_vects])

    fun = theano.function([feature_strengths, feature_vects], [peek_strengths, res, grad_s, grad_v], allow_input_downcast=True)

    mystrengths = np.array([[0.3,0.3,0.2,0.6,0.3,0.7,0.2,1], [0.3,0.3,0.2,0.6,0.3,0.7,0.2,1]], np.float32)
    myvects = np.tile(np.eye(8, dtype=np.float32), (2,1,1))
    mypeek, myres, mygs, mygv = fun(mystrengths, myvects)

    print(mypeek)
    print(myres)
    print(mygs)
    print(mygv)
    return mypeek, myres, mygs, mygv
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_remove_all_assert():
    x = theano.tensor.fmatrix()
    a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any())

    # By default `unsafe` should not be there
    f = theano.function([x], a, mode=mode_with_gpu.excluding('unsafe'))
    topo = f.maker.fgraph.toposort()
    a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
    assert len(a_op) == 1

    # Put `unsafe`
    f = theano.function([x], a, mode=mode_with_gpu.including('unsafe'))
    topo = f.maker.fgraph.toposort()
    a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
    assert len(a_op) == 0

    # Remove `unsafe`
    f = theano.function([x], a, mode=mode_with_gpu.excluding('unsafe'))
    topo = f.maker.fgraph.toposort()
    a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
    assert len(a_op) == 1
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_pdbbreakpoint_op():
    """ Test that PdbBreakpoint ops don't block gpu optimization"""
    b = tensor.fmatrix()

    # Create a function composed of a breakpoint followed by
    # some computation
    condition = tensor.gt(b.sum(), 0)
    b_monitored = PdbBreakpoint(name='TestBreakpoint')(condition, b)
    output = b_monitored ** 2

    f = theano.function([b], output, mode=mode_with_gpu)

    # Ensure that, in the compiled function, the computation following the
    # breakpoint has been moved to the gpu.
    topo = f.maker.fgraph.toposort()
    assert isinstance(topo[-2].op, GpuElemwise)
    assert topo[-1].op == host_from_gpu
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_fail_select_alot(self):
        """
        Tests that MultinomialWOReplacementFromUniform fails when asked to sample more
        elements than the actual number of elements
        """
        p = tensor.fmatrix()
        u = tensor.fvector()
        n = tensor.iscalar()
        m = multinomial.MultinomialWOReplacementFromUniform('auto')(p, u, n)

        f = function([p, u, n], m, allow_input_downcast=True)

        n_elements = 100
        n_selected = 200
        numpy.random.seed(12345)
        uni = numpy.random.rand(n_selected).astype(config.floatX)
        pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
        pvals /= pvals.sum(1)
        self.assertRaises(ValueError, f, pvals, uni, n_selected)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_select_distinct(self):
        """
        Tests that multinomial_wo_replacement always selects distinct elements
        """
        th_rng = RandomStreams(12345)

        p = tensor.fmatrix()
        n = tensor.iscalar()
        m = th_rng.multinomial_wo_replacement(pvals=p, n=n)

        f = function([p, n], m, allow_input_downcast=True)

        n_elements = 1000
        all_indices = range(n_elements)
        numpy.random.seed(12345)
        for i in [5, 10, 50, 100, 500, n_elements]:
            pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
            pvals /= pvals.sum(1)
            res = f(pvals, i)
            res = numpy.squeeze(res)
            assert len(res) == i
            assert numpy.all(numpy.in1d(numpy.unique(res), all_indices)), res
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_fail_select_alot(self):
        """
        Tests that multinomial_wo_replacement fails when asked to sample more
        elements than the actual number of elements
        """
        th_rng = RandomStreams(12345)

        p = tensor.fmatrix()
        n = tensor.iscalar()
        m = th_rng.multinomial_wo_replacement(pvals=p, n=n)

        f = function([p, n], m, allow_input_downcast=True)

        n_elements = 100
        n_selected = 200
        numpy.random.seed(12345)
        pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
        pvals /= pvals.sum(1)
        self.assertRaises(ValueError, f, pvals, n_selected)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_Strides2D(self):
        x = T.fmatrix('x')

        for axis in [0, 1, None, -1, -2]:
            a = np.random.random((42, 30)).astype("float32")
            cumsum_function = theano.function([x], cumsum(x, axis=axis),
                                              mode=self.mode)

            slicings = [slice(None, None, None),    # Normal strides
                        slice(None, None, 2),       # Stepped strides
                        slice(None, None, -1),      # Negative strides
                        ]

            # Cartesian product of all slicings to test.
            for slicing in itertools.product(slicings, repeat=x.ndim):
                f = theano.function([x], cumsum(x[slicing], axis=axis),
                                    mode=self.mode)
                assert [n for n in f.maker.fgraph.toposort()
                        if isinstance(n.op, GpuCumsum)]
                utt.assert_allclose(np.cumsum(a[slicing], axis=axis), f(a))
                utt.assert_allclose(np.cumsum(a[slicing], axis=axis),
                                    cumsum_function(a[slicing]))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_transfer_strided():
    # This is just to ensure that it works in theano
    # libgpuarray has a much more comprehensive suit of tests to
    # ensure correctness
    a = T.fmatrix('a')
    g = GpuArrayType(dtype='float32', broadcastable=(False, False))('g')

    av = numpy.asarray(rng.rand(5, 8), dtype='float32')
    gv = gpuarray.array(av, context=get_context(test_ctx_name))

    av = av[:, ::2]
    gv = gv[:, ::2]

    f = theano.function([a], GpuFromHost(test_ctx_name)(a))
    fv = f(av)
    assert GpuArrayType.values_eq(fv, gv)

    f = theano.function([g], host_from_gpu(g))
    fv = f(gv)
    assert numpy.all(fv == av)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def setUp(self):

        super(TestPdbBreakpoint, self).setUp()

        # Sample computation that involves tensors with different numbers
        # of dimensions
        self.input1 = T.fmatrix()
        self.input2 = T.fscalar()
        self.output = T.dot((self.input1 - self.input2),
                            (self.input1 - self.input2).transpose())

        # Declare the conditional breakpoint
        self.breakpointOp = PdbBreakpoint("Sum of output too high")
        self.condition = T.gt(self.output.sum(), 1000)
        (self.monitored_input1,
         self.monitored_input2,
         self.monitored_output) = self.breakpointOp(self.condition,
                                                    self.input1,
                                                    self.input2, self.output)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_dot22scalar_cast():
    """
    Test that in `dot22_to_dot22scalar` we properly cast integers to floats.
    """
    # Note that this test was failing before d5ff6904.
    A = T.dmatrix()
    for scalar_int_type in T.int_dtypes:
        y = T.scalar(dtype=scalar_int_type)
        f = theano.function([A, y], T.dot(A, A) * y, mode=mode_blas_opt)
        assert _dot22scalar in [x.op for x in f.maker.fgraph.toposort()]
    A = T.fmatrix()
    for scalar_int_type in T.int_dtypes:
        y = T.scalar(dtype=scalar_int_type)
        f = theano.function([A, y], T.dot(A, A) * y, mode=mode_blas_opt)
        if scalar_int_type in ['int32', 'int64']:
            assert _dot22 in [x.op for x in f.maker.fgraph.toposort()]
        else:
            assert _dot22scalar in [x.op for x in f.maker.fgraph.toposort()]
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_scalar_axes(self):
        # Test matrix-matrix
        amat = fmatrix()
        bmat = dmatrix()
              # We let at float64 to test mix of float32 and float64.
        axes = 1
        aval = rand(4, 5).astype('float32')
        bval = rand(5, 3)
        c = tensordot(amat, bmat, axes)
        f3 = inplace_func([amat, bmat], c)
        self.assertTrue(numpy.allclose(numpy.tensordot(aval, bval, axes),
                                       f3(aval, bval)))
        utt.verify_grad(self.TensorDot(axes), [aval, bval])

        # Test tensor-tensor
        amat = tensor3()
        bmat = tensor3()
        axes = 2
        aval = rand(3, 4, 5)
        bval = rand(4, 5, 3)
        c = tensordot(amat, bmat, axes)
        f3 = inplace_func([amat, bmat], c)
        self.assertTrue(numpy.allclose(numpy.tensordot(aval, bval, axes),
                                       f3(aval, bval)))
        utt.verify_grad(self.TensorDot(axes), [aval, bval])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_blocksparse_inplace_gemv_opt():
    b = tensor.fmatrix()
    W = tensor.ftensor4()
    h = tensor.ftensor3()
    iIdx = tensor.lmatrix()
    oIdx = tensor.lmatrix()

    o = sparse_block_dot(W, h, iIdx, b, oIdx)

    f = theano.function([W, h, iIdx, b, oIdx], o)

    if theano.config.mode == "FAST_COMPILE":
        assert not f.maker.fgraph.toposort()[-1].op.inplace
        assert check_stack_trace(f, ops_to_check=[sparse_block_gemv])
    else:
        assert f.maker.fgraph.toposort()[-1].op.inplace
        assert check_stack_trace(f, ops_to_check=[sparse_block_gemv_inplace])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_blocksparse_inplace_outer_opt():
    b = tensor.fmatrix()
    W = tensor.ftensor4()
    h = tensor.ftensor3()
    iIdx = tensor.lmatrix()
    oIdx = tensor.lmatrix()

    o = sparse_block_dot(W, h, iIdx, b, oIdx)

    f = theano.function([W, h, iIdx, b, oIdx],
                        [o, tensor.grad(o.sum(), wrt=W)])

    if theano.config.mode == "FAST_COMPILE":
        assert not f.maker.fgraph.toposort()[-1].op.inplace
        assert check_stack_trace(f, ops_to_check=sparse_block_outer)
    else:
        assert f.maker.fgraph.toposort()[-1].op.inplace
        assert check_stack_trace(f, ops_to_check=sparse_block_outer_inplace)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_sparseblockdot(self):
        """
        Compares the numpy version of sparseblockgemv to sparse_block_dot.
        """
        b = tensor.fmatrix()
        W = tensor.ftensor4()
        h = tensor.ftensor3()
        iIdx = tensor.imatrix()
        oIdx = tensor.imatrix()

        o = sparse_block_dot(W, h, iIdx, b, oIdx)

        f = theano.function([W, h, iIdx, b, oIdx], o, mode=self.mode)

        W_val, h_val, iIdx_val, b_val, oIdx_val = \
            BlockSparse_Gemv_and_Outer.gemv_data()

        th_out = f(W_val, h_val, iIdx_val, b_val, oIdx_val)

        ref_out = BlockSparse_Gemv_and_Outer.gemv_numpy(
            b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val)

        utt.assert_allclose(ref_out, th_out)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_sparseblockgemv(self):
        """
        Compares the numpy and theano versions of sparseblockgemv.
        """
        b = tensor.fmatrix()
        W = tensor.ftensor4()
        h = tensor.ftensor3()
        iIdx = tensor.imatrix()
        oIdx = tensor.imatrix()

        o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)

        f = theano.function([W, h, iIdx, b, oIdx], o, mode=self.mode)

        W_val, h_val, iIdx_val, b_val, oIdx_val = \
            BlockSparse_Gemv_and_Outer.gemv_data()

        th_out = f(W_val, h_val, iIdx_val, b_val, oIdx_val)
        ref_out = BlockSparse_Gemv_and_Outer.gemv_numpy(
            b_val.take(oIdx_val, axis=0), W_val, h_val, iIdx_val, oIdx_val)

        utt.assert_allclose(ref_out, th_out)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_sparseblockgemv_grad_shape(self):
        b = tensor.fmatrix()
        W = tensor.ftensor4()
        h = tensor.ftensor3()
        iIdx = tensor.imatrix()
        oIdx = tensor.imatrix()

        o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)
        go = theano.grad(o.sum(), [b, W, h])

        f = theano.function([W, h, iIdx, b, oIdx], go, mode=self.mode)

        W_val, h_val, iIdx_val, b_val, oIdx_val = \
            BlockSparse_Gemv_and_Outer.gemv_data()

        # just make sure that it runs correcly and all the shapes are ok.
        b_g, W_g, h_g = f(W_val, h_val, iIdx_val, b_val, oIdx_val)

        assert b_g.shape == b_val.shape
        assert h_g.shape == h_val.shape
        assert W_g.shape == W_val.shape
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_bugFunctioProvidesIntermediateNodesAsInputs(self):
        # This is a bug recently reported by Ilya
        # made it CPU friendly
        V = tensor.ftensor3('INPUT')
        orig = tensor.fmatrix('PARAM')
        # = gpu_from_host(orig)  # <-- this doesn't work
        W = orig + 2  # <-- has same effect but it works on CPU as well
        # W = T.fmatrix('PARAM') # <-- this line works

        def one_step(v, W):
            o = v + 1 + W.sum()  # <-- this doesn't work
            # o = v + 1  # <-- this line works
            return o

        OS, updates = theano.scan(
            fn=one_step,
            sequences=V,
            outputs_info=[None],
            non_sequences=[W])

        O = OS.sum() + W.sum()

        # This bug manifests itself by not allowing the function to compile,
        # so if it compiles it means the test pass
        f = theano.function([V, W], O)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_select_distinct(self):
        """
        Tests that MultinomialWOReplacementFromUniform always selects distinct elements
        """
        p = tensor.fmatrix()
        u = tensor.fvector()
        n = tensor.iscalar()
        m = multinomial.MultinomialWOReplacementFromUniform('auto')(p, u, n)

        f = function([p, u, n], m, allow_input_downcast=True)

        n_elements = 1000
        all_indices = range(n_elements)
        numpy.random.seed(12345)
        for i in [5, 10, 50, 100, 500, n_elements]:
            uni = numpy.random.rand(i).astype(config.floatX)
            pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
            pvals /= pvals.sum(1)
            res = f(pvals, uni, i)
            res = numpy.squeeze(res)
            assert len(res) == i
            assert numpy.all(numpy.in1d(numpy.unique(res), all_indices)), res
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_select_distinct(self):
        """
        Tests that multinomial_wo_replacement always selects distinct elements
        """
        th_rng = RandomStreams(12345)

        p = tensor.fmatrix()
        n = tensor.iscalar()
        m = th_rng.multinomial_wo_replacement(pvals=p, n=n)

        f = function([p, n], m, allow_input_downcast=True)

        n_elements = 1000
        all_indices = range(n_elements)
        numpy.random.seed(12345)
        for i in [5, 10, 50, 100, 500, n_elements]:
            pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
            pvals /= pvals.sum(1)
            res = f(pvals, i)
            res = numpy.squeeze(res)
            assert len(res) == i
            assert numpy.all(numpy.in1d(numpy.unique(res), all_indices)), res
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_fail_select_alot(self):
        """
        Tests that multinomial_wo_replacement fails when asked to sample more
        elements than the actual number of elements
        """
        th_rng = RandomStreams(12345)

        p = tensor.fmatrix()
        n = tensor.iscalar()
        m = th_rng.multinomial_wo_replacement(pvals=p, n=n)

        f = function([p, n], m, allow_input_downcast=True)

        n_elements = 100
        n_selected = 200
        numpy.random.seed(12345)
        pvals = numpy.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
        pvals /= pvals.sum(1)
        self.assertRaises(ValueError, f, pvals, n_selected)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_remove_all_assert():
    x = theano.tensor.fmatrix()
    a = theano.tensor.opt.assert_op(x, theano.tensor.eq(x, 0).any())

    # By default `unsafe` should not be there
    f = theano.function([x], a, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
    assert len(a_op) == 1

    # Put `unsafe`
    f = theano.function([x], a, mode=mode_with_gpu.including('unsafe'))
    topo = f.maker.fgraph.toposort()
    a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
    assert len(a_op) == 0

    # Remove `unsafe`
    f = theano.function([x], a, mode=mode_with_gpu.excluding('unsafe'))
    topo = f.maker.fgraph.toposort()
    a_op = [n for n in topo if isinstance(n.op, theano.tensor.opt.Assert)]
    assert len(a_op) == 1
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_pdbbreakpoint_op():
    """ Test that PdbBreakpoint ops don't block gpu optimization"""
    b = tensor.fmatrix()

    # Create a function composed of a breakpoint followed by
    # some computation
    condition = tensor.gt(b.sum(), 0)
    b_monitored = PdbBreakpoint(name='TestBreakpoint')(condition, b)
    output = b_monitored ** 2

    f = theano.function([b], output, mode=mode_with_gpu)

    # Ensure that, in the compiled function, the computation following the
    # breakpoint has been moved to the gpu.
    topo = f.maker.fgraph.toposort()
    assert isinstance(topo[-2].op, cuda.GpuElemwise)
    assert topo[-1].op == cuda.host_from_gpu
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_gpu_elemwise_careduce():
    x = theano.tensor.fmatrix()
    o = (x * x).sum()
    f = theano.function([x], o, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 3
    assert topo[1].op.pre_scalar_op == theano.scalar.sqr
    data = numpy.random.rand(3, 4).astype('float32')
    utt.assert_allclose(f(data), (data * data).sum())

    o = (x * x).sum(axis=1)
    f = theano.function([x], o, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 3
    assert topo[1].op.pre_scalar_op == theano.scalar.sqr
    utt.assert_allclose(f(data), (data * data).sum(axis=1))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_elemwise_fusion():
    """ Test the the GpuElemwise fusion work correctly"""
    shape = (3, 4)
    a = cuda.shared_constructor(theano._asarray(numpy.random.rand(*shape),
                                                dtype='float32'), 'a')
    b = tensor.fmatrix()
    c = tensor.fmatrix()
    f = pfunc([b, c], [a + b + c], mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    for i, node in enumerate(topo):
        print(i, node, file=sys.stdout)
    assert len(topo) == 4
    assert isinstance(topo[2].op.scalar_op, theano.scalar.basic.Composite)
    # let debugmode catch errors
    f(theano._asarray(numpy.random.rand(*shape), dtype='float32'),
      theano._asarray(numpy.random.rand(*shape), dtype='float32'))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_incsubtensor_mixed():

    # This catches a bug that occurred when incrementing
    # a float32 tensor by a float64 tensor.
    # The result is defined to be float32, so it is OK
    # to downcast the float64 increment in order to
    # transfer it to the GPU.
    # The bug was that the optimization called GpuFromHost
    # without casting first, causing the optimization to
    # fail.
    X = tensor.fmatrix()
    Y = tensor.dmatrix()
    Z = tensor.inc_subtensor(X[0:1, 0:1], Y)
    f = theano.function([X, Y], Z, mode=mode_with_gpu)
    packed, = f.maker.fgraph.inputs[1].clients
    client, idx = packed
    print(client)
    assert isinstance(client.op, tensor.Elemwise)
    assert isinstance(client.op.scalar_op, theano.scalar.Cast)
    packed, = client.outputs[0].clients
    client, idx = packed
    assert isinstance(client.op, cuda.GpuFromHost)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_softmax(self):
        x = T.fmatrix('x')
        z = T.nnet.softmax_op

        def check_types(graph, graph_gpu):
            self._check_types(
                graph,
                graph_gpu,
                type(z),
                self.gpu_op
            )

        f, f_gpu = self._test_softmax(
            x,
            x,
            z,
            z,
            self._cmp,
            check_types
        )

        if self.do_big:
            self._cmp(2 << 15, 5, f, f_gpu)
        if self.do_0:
            self._cmp(0, 10, f, f_gpu)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_Strides2D(self):
        x = T.fmatrix('x')

        for axis in [0, 1, None, -1, -2]:
            a = np.random.random((42, 30)).astype("float32")
            cumsum_function = theano.function([x], cumsum(x, axis=axis),
                                              mode=self.mode)

            slicings = [slice(None, None, None),    # Normal strides
                        slice(None, None, 2),       # Stepped strides
                        slice(None, None, -1),      # Negative strides
                        ]

            # Cartesian product of all slicings to test.
            for slicing in itertools.product(slicings, repeat=x.ndim):
                f = theano.function([x], cumsum(x[slicing], axis=axis),
                                    mode=self.mode)
                assert [n for n in f.maker.fgraph.toposort()
                        if isinstance(n.op, GpuCumsum)]
                utt.assert_allclose(np.cumsum(a[slicing], axis=axis), f(a))
                utt.assert_allclose(np.cumsum(a[slicing], axis=axis),
                                    cumsum_function(a[slicing]))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_deepcopy():
    a = cuda.fmatrix()
    a_v = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))

    # We force the c code to check that we generate c code
    mode = theano.Mode("c", mode_with_gpu.optimizer)
    f = theano.function([a], a, mode=mode)
    theano.printing.debugprint(f)
    out = f(a_v)
    assert out is not a_v
    assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))

    # We force the python linker as the default code should work for this op
    mode = theano.Mode("py", mode_with_gpu.optimizer)
    f = theano.function([a], a, mode=mode)
    theano.printing.debugprint(f)
    out = f(a_v)
    assert out is not a_v
    assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_gpu_out_multiple_clients(self):
        # Test that when the output of gpu_from_host is used by more
        # than one Op, the gradient still works.
        # A problem used to be that GpuFromHost.grad expected the output
        # gradient to be on GPU, but the summation of the different
        # incoming gradients was done on CPU.

        x = tensor.fmatrix('x')
        z = cuda.gpu_from_host(x)

        n1 = tensor.nnet.sigmoid(z)
        n2 = tensor.dot(z, z.T)

        s1 = n1.sum()
        s2 = n2.sum()

        c = s1 + s2

        dc_dx = theano.grad(c, x)
        if self.verbose:
            theano.printing.debugprint(c, print_type=True)
            theano.printing.debugprint(dc_dx, print_type=True)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_elemwise0():

    a = tcn.shared_constructor(theano._asarray(numpy.random.rand(4, 4),
                                               dtype='float32'), 'a')

    b = tensor.fmatrix()

    f = pfunc([b], [], updates=[(a, a + b)], mode=mode_with_gpu)

    # check that we work inplace.
    assert (list(
        f.maker.fgraph.toposort()[1].op.destroy_map.items()) == [
            (0, [0])])

    a0 = a.get_value() * 1.0
    f(numpy.ones((4, 4), dtype='float32'))

    assert numpy.all(a0 + 1.0 == a.get_value())
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_elemwise1():
    """ Several kinds of elemwise expressions with no broadcasting,
    non power-of-two shape """

    shape = (3, 4)
    a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
                                               dtype='float32') + 0.5, 'a')
    b = tensor.fmatrix()

    # let debugmode catch any mistakes
    f = pfunc([b], [], updates=[(a, b ** a)], mode=mode_with_gpu)
    f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)

    # let debugmode catch any mistakes
    f = pfunc([b], [], updates=[(a, tensor.exp(b ** a))], mode=mode_with_gpu)
    f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)

    # let debugmode catch any mistakes
    f = pfunc([b], [], updates=[(a, a + b * tensor.exp(b ** a))],
              mode=mode_with_gpu)
    f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_elemwise_comparaison_cast():
    """
    test if an elemwise comparaison followed by a cast to float32 are
    pushed to gpu.
    """

    a = tensor.fmatrix()
    b = tensor.fmatrix()
    av = theano._asarray(numpy.random.rand(4, 4), dtype='float32')
    bv = numpy.ones((4, 4), dtype='float32')

    for g, ans in [(tensor.lt, av < bv), (tensor.gt, av > bv),
                   (tensor.le, av <= bv), (tensor.ge, av >= bv)]:

        f = pfunc([a, b], tensor.cast(g(a, b), 'float32'), mode=mode_with_gpu)

        out = f(av, bv)
        assert numpy.all(out == ans)
        assert any([isinstance(node.op, cuda.GpuElemwise)
                    for node in f.maker.fgraph.toposort()])
项目:Buffe    作者:bentzinir    | 项目源码 | 文件源码
def my_2pir_jump(x):
    x_out = np.zeros_like(x)
    for idx,val in enumerate(x):
        if val[0] < 0 :
            x_out[idx] = val[0] + 62.831853 # 2*pi*r
        else:
            x_out[idx] = val[0]
    return x_out

# x = tt.fmatrix()
# # y = tt.fmatrix()
# #
# f = function([x], my_2pir_jump(x))
# inp1 = 160 * np.random.randn(2,1).astype('float32')
# # inp2 = np.random.rand(2,2).astype('float32')
# out = f(inp1)
# print 'inp1:'
# print inp1
# # print 'inp2:'
# # print inp2
# print 'out:'
# print out
项目:Buffe    作者:bentzinir    | 项目源码 | 文件源码
def __init__(self, game_params, arch_params, solver_params, trained_model, sn_dir):

        params=None

        if trained_model:
            params = common.load_params(trained_model)

        self.lr_func = create_learning_rate_func(solver_params)
        self.v_h_0 = tt.fvector('v_h_0')
        self.x_h_0 = tt.fvector('x_h_0')
        self.v_t_0 = tt.fmatrix('v_t_0')
        self.x_t_0 = tt.fmatrix('x_t_0')
        self.a_t_0 = tt.fmatrix('a_t_0')
        self.is_aggressive = tt.fmatrix('is_aggressive')
        self.lr_ = tt.fscalar('lr')
        self.n_steps_ = tt.iscalar('n_steps')
        self.sn_dir = sn_dir
        self.game_params = game_params
        self.arch_params = arch_params
        self.solver_params = solver_params

        self.model = CONTROLLER(self.v_h_0, self.x_h_0, self.v_t_0, self.x_t_0, self.a_t_0, self.is_aggressive, self.lr_, self.n_steps_, self.game_params, self.arch_params, self.solver_params, params)
项目:crayimage    作者:yandexdataschool    | 项目源码 | 文件源码
def define(self, n_channels, n_units = 1, dtype='uint8'):
    self.input = T.matrix(name='global pool', dtype=dtype)
    float_input = self.input.astype('float32')
    normed_input = T.log(float_input + 1)

    self.labels = T.fmatrix(name='labels')

    input_l = layers.InputLayer(shape=(None, ) + (n_channels, ), input_var=normed_input)

    dense = layers.DenseLayer(
      input_l,
      num_units=n_units,
      nonlinearity=nonlinearities.sigmoid
    )

    self.net = layers.DenseLayer(
      dense,
      num_units=1,
      nonlinearity=nonlinearities.sigmoid
    )
项目:crayimage    作者:yandexdataschool    | 项目源码 | 文件源码
def define(self, n_units = 1):
    self.sample_weights = T.fvector(name='weights')
    self.labels = T.fvector(name='labels')
    self.input = T.fmatrix(name='input')

    input_layer = layers.InputLayer(shape=(None , 1), input_var=self.input)

    dense1 = layers.DenseLayer(
      input_layer,
      num_units=n_units,
      nonlinearity=nonlinearities.sigmoid
    )

    self.net = layers.DenseLayer(
      dense1,
      num_units=1,
      nonlinearity=nonlinearities.sigmoid
    )
项目:State-Frequency-Memory-Recurrent-Neural-Networks    作者:hhkunming    | 项目源码 | 文件源码
def eval_batch(batch_fea, nframe, tparams, omega, options):
    x = T.fmatrix('x')
    tp = theano.function([x], SFM(tparams, x, omega, options), name='feat', allow_input_downcast=True)

    n = nframe.shape[0]
    log_like=[None]*n
    for i in range(n):
        x_start = np.sum(nframe[:i])
        x_end = x_start+nframe[i]
        x_input = batch_fea[x_start:x_end]

        outs = tp(x_input)
        log_like_fr = [None]*nframe[i]
        for j in range(nframe[i]):
            log_like_fr[j] = np.dot(x_input[j], np.log(softmax(np.dot(tparams['W'].get_value(), outs[j])+tparams['b'].get_value())))
        log_like[i] = np.mean(log_like_fr)
    return log_like
项目:SERT    作者:cvangysel    | 项目源码 | 文件源码
def _finalize(self, loss_fn,
                  weight_layers, projection_layers,
                  eval_layer, predict_layer=None,
                  additional_params=[]):
        assert hasattr(weight_layers, '__iter__')
        assert hasattr(projection_layers, '__iter__')

        output_layer = eval_layer

        # Be flexible in terms of batch input formats.
        x_batch = T.matrix('input_indices', dtype=self.input_dtype)

        # Do not be flexible w.r.t. output type.
        y_batch = (
            T.ivector('y') if self.training_set[1].ndim == 1
            else T.fmatrix('y'))

        # Instance weights for training.
        w_batch = T.fvector('weights')

        objective = WeightedObjective(
            output_layer, loss_function=loss_fn)

        loss_train = objective.get_loss(
            weights=w_batch,
            input=x_batch,
            target=y_batch,
            deterministic=False)

        loss_eval = objective.get_loss(
            input=x_batch, target=y_batch, deterministic=True)

        loss_train += self._regularization(
            weight_layers, projection_layers)

        self._create_functions(output_layer, loss_train, loss_eval,
                               dict(x_batch=x_batch,
                                    y_batch=y_batch,
                                    w_batch=w_batch),
                               predict_layer=predict_layer,
                               additional_params=additional_params)
项目:sesame-paste-noodle    作者:aissehust    | 项目源码 | 文件源码
def test_predictBatchSize():
    """
    Test batch size works for perdictor.
    """
    n = N.Network()
    n.batchSize = 2

    n.inputSizeChecker = [1, 1]

    x = T.fmatrix()
    y = T.switch(T.gt(x, 0), 1, 0)
    f = theano.function([x], y, allow_input_downcast=True)
    n.predicter = f

    tx = np.array([[-0.27540332], [-0.76737626], [ 0.84122449], [-1.96092991], [-0.44198351],
                   [ 0.79166672], [ 0.87340424], [ 0.04555511], [-2.11510706], [-0.10966502],
                   [ 0.54762297], [-1.56990211], [-0.61545427], [ 1.11211698], [-0.66220848],
                   [ 0.11964702], [-2.15263133], [-1.8672312 ], [ 0.22093941], [-0.46957548]])
    ty = np.array([[0], [0], [1], [0], [0],
                   [1], [1], [1], [0], [0],
                   [1], [0], [0], [1], [0],
                   [1], [0], [0], [1], [0]])
    tlen = 20

    assert (ty == n.predict(tx)).all()
    assert (ty[:(tlen-1), :] == n.predict(tx[:(tlen-1), :])).all()
项目:peters-stuff    作者:peterneher    | 项目源码 | 文件源码
def setup(self, bottom, top):
        # check input pair
        if len(bottom) != 2:
            raise Exception("Need two inputs to compute the dice. the result of the softmax and the ground truth.")

        try :
            params = eval(self.param_str)
            if "param1" in params :
                self.ignore_label = int(params["param1"])
        except :
            pass

        if len(bottom[0].data.shape)==4 :
            self.prediction = T.fmatrix()
            self.ground_truth = T.fmatrix()
        elif len(bottom[0].data.shape)==5 :
            self.prediction = T.ftensor3()
            self.ground_truth = T.ftensor3()
        else:
            raise Exception('DiceIndexLayer only supports 2D or 3D data at the moment.')

        intersection = T.sum(self.prediction * self.ground_truth)
        denominator = T.sum(self.prediction) + T.sum(self.ground_truth)
        dice = 1 - 2 * intersection / (denominator + 0.00001)

        self.f = theano.function([self.prediction, self.ground_truth], dice)

        grad = T.grad(dice, wrt=self.prediction)
        self.g = theano.function([self.prediction, self.ground_truth], grad)
项目:NCRF-AE    作者:cosmozhang    | 项目源码 | 文件源码
def __init__(self, rng, embeddings, char_embeddings, hiddensize, char_hiddensize, embedding_dim, char_embedding_dim, window_size, num_tags, dic_size, dropout_rate = 0.7):
        self.rng = rng
        self.inputX = T.imatrix('inputX') # a sentence, shape (T * window_size)
        self.inputX_chars = T.itensor3('inputX_chars') # a sentence, shape (T * max numbe of chars in a word)
        self.inputY = T.ivector('inputY') # tags of a sentence
        self.is_train = T.iscalar('is_train')

        self.new_theta = T.fmatrix('new_theta')

        self.dropout_rate = dropout_rate
        self.nhidden = hiddensize
        self.char_nhidden = char_hiddensize # for now set the number of hidden units the same
        self.embedding_dim = embedding_dim
        self.char_embedding_dim = char_embedding_dim
        self.window_size = window_size
        self.n_classes = num_tags
        self.dic_size = dic_size

        # for testing in compling
        self.inputX.tag.test_value = np.ones((10, window_size)).astype(np.int32)
        self.inputX_chars.tag.test_value = np.ones((10, window_size, 8)).astype(np.int32)
        self.inputY.tag.test_value = np.ones(10).astype(np.int32)

        self.Embeddings = theano.shared(value = embeddings, name = "Embeddings", borrow = True)
        self.Char_Embeddings = theano.shared(value = char_embeddings, name = "Char_Embeddings", borrow = True)

        # word embeddings
        self.inputW = self.Embeddings[self.inputX]

        # char embeddings
        self.inputC = self.Char_Embeddings[self.inputX_chars].dimshuffle([2, 0, 1, 3])

        self.params = [self.Embeddings, self.Char_Embeddings]