Python theano 模块,gof() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.gof()

项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def register_opt2(tracks, *tags, **kwargs):
    '''
    Decorator for the new GraphToGPU optimizer.
    Takes an extra parameter(Op) compared to register_opt decorator.

    Parameters
    ----------
    tracks : List of Op class Or Op instance or None
        The Node's Op to which optimization is being applied.

    tags : String
        The optimization tag to which the optimizer will be registered.

    '''
    def f(local_opt):
        name = (kwargs and kwargs.pop('name')) or local_opt.__name__
        opt = theano.gof.local_optimizer(tracks)(local_opt)
        gpu_optimizer2.register(name, opt, 'fast_run', 'gpuarray', *tags)
        return local_opt
    return f
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, a, val, offset):
        a = tensor.as_tensor_variable(a)
        val = tensor.as_tensor_variable(val)
        offset = tensor.as_tensor_variable(offset)
        if a.ndim != 2:
            raise TypeError('%s: first parameter must have exactly'
                            ' two dimensions' % self.__class__.__name__)
        elif val.ndim != 0:
            raise TypeError('%s: second parameter must be a scalar'
                            % self.__class__.__name__)
        elif offset.ndim != 0:
            raise TypeError('%s: third parameter must be a scalar'
                            % self.__class__.__name__)
        val = tensor.cast(val, dtype=scalar.upcast(a.dtype, val.dtype))
        if val.dtype != a.dtype:
            raise TypeError('%s: type of second parameter must be the same'
                            ' as the first\'s' % self.__class__.__name__)
        elif offset.dtype[:3] != 'int':
            raise TypeError('%s: type of third parameter must be as integer'
                            ' use theano.tensor.cast( input, \'int32/int64\')'
                            % self.__class__.__name__)

        return gof.Apply(self, [a, val, offset], [a.type()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_canonicalize_nan(self):
        """
        Regression test for bug in canonicalization of NaN values.

        This bug caused an infinite loop which was caught by the equilibrium
        optimizer, resulting in an error log message.
        """
        sio = StringIO()
        handler = logging.StreamHandler(sio)
        handler.setLevel(logging.ERROR)
        logging.getLogger('theano.gof.opt').addHandler(handler)
        try:
            x = vector()
            f = theano.function([x], x + numpy.nan)
        finally:
            logging.getLogger('theano.gof.opt').removeHandler(handler)
        # Ideally this test would only catch the maxed out equilibrium
        # optimizer error message, but to be safe in case this message
        # is modified in the future, we assert that there is no error
        # at all.
        assert not sio.getvalue()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_local_reshape_dimshuffle():

    reshape_dimshuffle = out2in(local_dimshuffle_alloc)

    x = tensor.vector('x')

    out = tensor.alloc(x, 3, 2).dimshuffle('x', 'x', 0, 1)

    g = FunctionGraph([x], [out])
    reshape_dimshuffle(g)

    l=theano.gof.PerformLinker()
    l.accept(g)
    f=l.make_function()

    assert f([3, 4]).ndim == 4

    topo = g.toposort()
    assert any([not isinstance(x, DimShuffle) for x in topo])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, x, axis, splits):
        """WRITEME"""
        x = as_tensor_variable(x)
        axis = as_tensor_variable(axis)
        splits = as_tensor_variable(splits)

        if splits.type not in int_vector_types:
            raise TypeError('splits must have type tensor.lvector',
                            splits.type)
        if axis.type not in int_types:
            raise TypeError('axis must have type lscalar', axis.type)

#         # The following lines are necessary if we allow splits of zero
#         if isinstance(axis, gof.Constant):
#             x = unbroadcast(x, int(axis.data))
#         else:
#             x = unbroadcast(x, *range(x.type.ndim))

        inputs = [x, axis, splits]
        outputs = [x.type() for i in xrange(self.len_splits)]

        return Apply(self, inputs, outputs)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, x):
        t_x = as_tensor_variable(x)
        if self.outdim < 1 or (x.ndim and self.outdim > x.ndim):
            raise ValueError('invalid output ndimensions (%i) for tensor of '
                             'rank %i' % (self.outdim, t_x.ndim))

        # Infer the broadcastable pattern of the output. For every dimension
        # unaffected by the flatten, the broadcast flag should be unchanged.
        # For the dimension resulting from the collapse of other dimensions,
        # it should be broadcastable iff all the collapsed dimensions were
        # broadcastable.
        bcast_kept_dims = x.broadcastable[:self.outdim - 1]
        bcast_new_dim = python_all(x.broadcastable[self.outdim - 1:])
        broadcastable = bcast_kept_dims + (bcast_new_dim,)

        return gof.Apply(self, [t_x], [tensor(x.type.dtype,
                                              broadcastable)])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_free(self):
        """
        Make test on free() function
        """
        x = T.vector('x')
        func = function([x], x + 1)
        func.fn.allow_gc = False
        func([1])

        check_list = []
        for key, val in iteritems(func.fn.storage_map):
            if not isinstance(key, theano.gof.Constant):
                check_list.append(val)
        assert any([val[0] for val in check_list])

        func.free()

        for key, val in iteritems(func.fn.storage_map):
            if not isinstance(key, theano.gof.Constant):
                assert (val[0] is None)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def accept(self, fgraph, no_recycling=None, profile=None):
        """

        Parameters
        ----------
        fgraph : gof.FunctionGraph
            The fgraph which we will link.
        no_recycling : a list of Variables that belong to fgraph.
            If a Variable is in no_recycling, L{WrapLinker} will clear
            the output storage associated to it (for each linker in linkers)
            during the computation to avoid reusing it.

        """
        if no_recycling is None:
            no_recycling = []
        if self.fgraph is not None and self.fgraph is not fgraph:
            return type(self)(self.linkers, self.wrapper).accept(fgraph,
                                                                 no_recycling)

        self.fgraph = fgraph
        self.no_recycling = no_recycling
        self.linkers = [linker.accept(fgraph, no_recycling)
                        for linker in self.linkers]
        return self
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_vm_gc():
    """This already caused a bug in the trunk of Theano.

    The bug was introduced in the trunk on July 5th, 2012 and fixed on
    July 30th.

    """
    x = theano.tensor.vector()
    p = RunOnce()(x)
    mode = theano.Mode(linker=theano.gof.vm.VM_Linker(lazy=True))
    f = theano.function([theano.In(x, mutable=True)], [p + 1, p + 2],
                        mode=mode)
    f([1, 2, 3])

    p = RunOnce()(x)
    pp = p + p
    f = theano.function([x], [pp + pp],
                        mode=mode)
    f([1, 2, 3])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_sort_schedule_fn():
    import theano
    from theano.gof.sched import sort_schedule_fn, make_depends
    x = theano.tensor.matrix('x')
    y = theano.tensor.dot(x[:5] * 2, x.T + 1).T

    def str_cmp(a, b):
        return cmp(str(a), str(b))  # lexicographical sort

    linker = theano.OpWiseCLinker(schedule=sort_schedule_fn(str_cmp))
    mode = theano.Mode(linker=linker)
    f = theano.function((x,), (y,), mode=mode)

    nodes = f.maker.linker.make_all()[-1]
    depends = make_depends()
    for a, b in zip(nodes[:-1], nodes[1:]):
        if not depends((b, a)):
            assert str(a) < str(b)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_not(self):
        x, y, z = ints('xyz')
        fn = gof.DualLinker().accept(FunctionGraph([x, y], [invert(x)])).make_function()
        for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
            self.assertTrue(fn(a, b) == ~a, (a,))

        x, y, z = ints('xyz')
        fn = gof.DualLinker().accept(FunctionGraph([x, y], [~x])).make_function()
        for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
            self.assertTrue(fn(a, b) == ~a, (a,))


# This class does not inherit from unittest.TestCase, because it would
# interfere with the "yield" mechanism that automatically generates test, see
# http://stackoverflow.com/questions/6689537/nose-test-generators-inside-class
# Therefore, it needs to be named "test_..." or "Test_...", so nose can pick
# it up by name, otherwise the tests would not be executed.
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def as_scalar(x, name=None):
    from ..tensor import TensorType, scalar_from_tensor
    if isinstance(x, gof.Apply):
        if len(x.outputs) != 1:
            raise ValueError("It is ambiguous which output of a multi-output"
                             " Op has to be fetched.", x)
        else:
            x = x.outputs[0]
    if isinstance(x, Variable):
        if isinstance(x.type, Scalar):
            return x
        elif isinstance(x.type, TensorType) and x.ndim == 0:
            return scalar_from_tensor(x)
        else:
            raise TypeError("Variable type field must be a Scalar.", x, x.type)
    try:
        return constant(x)
    except TypeError:
        raise TypeError("Cannot convert %s to Scalar" % x, type(x))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def c_code_contiguous(self, node, name, inputs, outputs, sub):
        (x,) = inputs
        (z,) = outputs
        if (not theano.config.lib.amdlibm or
                # We compare the dtype AND the broadcast flag
                # as this function do not broadcast
                node.inputs[0].type != node.outputs[0].type):
            raise theano.gof.utils.MethodNotDefined()

        dtype = node.inputs[0].type.dtype_specs()[1]
        fct_call = self.c_code_contiguous_raw(dtype, 'n', 'x', 'z')
        return """
{
        npy_intp n = PyArray_SIZE(%(z)s);
        %(dtype)s * x = (%(dtype)s*) PyArray_DATA(%(x)s);
        %(dtype)s * z = (%(dtype)s*) PyArray_DATA(%(z)s);
        %(fct_call)s;
}
        """ % locals()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def c_support_code_apply(self, node, name):
        self.init_c_code()
        rval = []
        for subnode, subnodename in zip(self.fgraph.toposort(), self.nodenames):
            try:
                subnode_support_code = subnode.op.c_support_code_apply(
                    subnode,
                    subnodename % dict(nodename=name))
                if subnode_support_code:
                    rval.append(subnode_support_code)
            except gof.utils.MethodNotDefined:
                pass
        # there should be no need to remove duplicate code blocks because
        # each block should have been specialized for the given nodename.
        # Any block that isn't specialized should be returned via
        # c_support_code instead of c_support_code_apply.
        return "\n".join(rval)
项目:sampleRNN_ICLR2017    作者:soroushmehr    | 项目源码 | 文件源码
def _is_symbolic(v):
    r"""Return `True` if any of the arguments are symbolic.
    See:
        https://github.com/Theano/Theano/wiki/Cookbook
    """
    symbolic = False
    v = list(v)
    for _container, _iter in [(v, xrange(len(v)))]:
        for _k in _iter:
            _v = _container[_k]
            if isinstance(_v, theano.gof.Variable):
                symbolic = True
    return symbolic
项目:mimicry.ai    作者:fizerkhan    | 项目源码 | 文件源码
def _is_symbolic(v):
    r"""Return `True` if any of the arguments are symbolic.
    See:
        https://github.com/Theano/Theano/wiki/Cookbook
    """
    symbolic = False
    v = list(v)
    for _container, _iter in [(v, xrange(len(v)))]:
        for _k in _iter:
            _v = _container[_k]
            if isinstance(_v, theano.gof.Variable):
                symbolic = True
    return symbolic
项目:kfac-theano    作者:r00tman    | 项目源码 | 文件源码
def make_node(self, x):
        x = T.as_tensor_variable(x)
        assert x.ndim == 2
        o = T.scalar(dtype=x.dtype)
        return theano.gof.Apply(self, [x], [o])
项目:kfac-theano    作者:r00tman    | 项目源码 | 文件源码
def make_node(self, A, b):
        A = T.as_tensor_variable(A)
        b = T.as_tensor_variable(b)
        assert A.ndim == 2
        assert b.ndim in [1, 2]
        otype = T.tensor(
            broadcastable=b.broadcastable,
            dtype=(A * b).dtype)
        return theano.gof.Apply(self, [A, b], [otype])
项目:kfac-theano    作者:r00tman    | 项目源码 | 文件源码
def make_node(self, A, B):
        A = T.as_tensor_variable(A)
        B = T.as_tensor_variable(B)
        assert A.ndim in [2, 3]
        assert B.ndim in [2, 3]
        assert A.ndim == B.ndim
        otype = T.tensor(
            broadcastable=B.broadcastable,
            dtype=(A * B).dtype)
        return theano.gof.Apply(self, [A, B], [otype])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def __init__(self, props=None, **more_props):
        if props is None:
            props = {}
        elif isinstance(props, gof.utils.scratchpad):
            self.__update__(props)
        else:
            self.__dict__.update(props)
        self.__dict__.update(more_props)
        # A dict from the object to print to its string
        # representation. If it is a dag and not a tree, it allow to
        # parse each node of the graph only once. They will still be
        # printed many times
        self.memo = {}
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def assign(self, condition, printer):
        # condition can be a class or an instance of an Op.
        if isinstance(condition, (gof.Op, type)):
            self.printers_dict[condition] = printer
            return
        self.printers.insert(0, (condition, printer))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, M):
        M = tensor.as_tensor_variable(M)
        if M.ndim != 0:
            raise TypeError('%s only works on scalar input'
                            % self.__class__.__name__)
        elif (not M.dtype.startswith('int') and
              not M.dtype.startswith('uint')):
            # dtype is a theano attribute here
            raise TypeError('%s only works on integer input'
                            % self.__class__.__name__)
        return gof.Apply(self, [M], [tensor.dvector()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, a, val):
        a = tensor.as_tensor_variable(a)
        val = tensor.as_tensor_variable(val)
        if a.ndim < 2:
            raise TypeError('%s: first parameter must have at least'
                            ' two dimensions' % self.__class__.__name__)
        elif val.ndim != 0:
            raise TypeError('%s: second parameter must be a scalar'
                            % self.__class__.__name__)
        val = tensor.cast(val, dtype=scalar.upcast(a.dtype, val.dtype))
        if val.dtype != a.dtype:
            raise TypeError('%s: type of second parameter must be the same as'
                            ' the first\'s' % self.__class__.__name__)
        return gof.Apply(self, [a, val], [a.type()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def __init__(self, type, owner=None, index=None, name=None):
        super(TensorVariable, self).__init__(type, owner=owner,
                                             index=index, name=name)
        if (config.warn_float64 != 'ignore' and type.dtype == 'float64'):
            msg = ('You are creating a TensorVariable '
                   'with float64 dtype. You requested an action via '
                   'the Theano flag warn_float64={ignore,warn,raise,pdb}.')
            if config.warn_float64 == "warn":
                # Get the user stack. We don't want function inside the
                # tensor and gof directory to be shown to the user.
                x = tb.extract_stack()
                nb_rm = 0
                while x:
                    file_path = x[-1][0]
                    rm = False
                    for p in ["theano/tensor/", "theano\\tensor\\",
                              "theano/gof/", "theano\\tensor\\"]:
                        if p in file_path:
                            x = x[:-1]
                            nb_rm += 1
                            rm = True
                            break
                    if not rm:
                        break
                warnings.warn(msg, stacklevel=1 + nb_rm)
            elif config.warn_float64 == "raise":
                raise Exception(msg)
            elif config.warn_float64 == 'pdb':
                import pdb
                pdb.set_trace()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def speed_fusion(self, shared_fn=shared, gpu=False, s=None):
        """
        param type s: a slice object
        param s: a slice to apply to the case to execute. If None, exec all case.
        """

        shp = (3000, 3000)
        shp = (1000, 1000)
        nb_repeat = 50
#        linker=gof.CLinker
#        linker=gof.OpWiseCLinker

        mode1 = copy.copy(compile.get_default_mode())
        mode1._optimizer = mode1._optimizer.including('local_elemwise_fusion')
        # TODO:clinker is much faster... but use to much memory
        # Possible cause: as their is do deletion of intermediate value when we don't keep the fct.
        # More plausible cause: we keep a link to the output data?
        # Follow up. Clinker do the same... second cause?
        mode2 = copy.copy(compile.get_default_mode())
        mode2._optimizer = mode2._optimizer.excluding('local_elemwise_fusion')
        print("test with linker", str(mode1.linker))
        times1 = self.do(mode1, shared_fn, shp, gpu=gpu, nb_repeat=nb_repeat,
                         assert_len_topo=False, slice=s)
        times2 = self.do(mode2, shared_fn, shp, gpu=gpu, nb_repeat=nb_repeat,
                         assert_len_topo=False, slice=s)
        print("times1 with local_elemwise_fusion")
        print(times1, times1.min(), times1.max(), times1.sum())
        print("times2 without local_elemwise_fusion")
        print(times2, times2.min(), times2.max(), times2.sum())
        d = times2 / times1

        print("times2/times1")
        print(d)
        print("min", d.min(), "argmin", d.argmin(), "max", d.max(), \
            "mean", d.mean(), "std", d.std())
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def get_idx_list(inputs, idx_list, get_count=False):
    """
    Given a list of inputs to the subtensor and its idx_list reorders
    the inputs according to the idx list to get the right values.

    If get_counts=True, instead returns the number of inputs consumed
    during this process.

    """

    # The number of indices
    n = len(inputs) - 1

    # The subtensor (or idx_list) does not depend on the inputs.
    if n == 0:
        return tuple(idx_list)
    indices = list(reversed(list(inputs[1:])))

    # General case
    def convert(entry):
        if isinstance(entry, gof.Type):
            return indices.pop()
        elif isinstance(entry, slice):
            return slice(convert(entry.start),
                         convert(entry.stop),
                         convert(entry.step))
        else:
            return entry
    cdata = tuple(map(convert, idx_list))
    if get_count:
        return n - len(indices)
    else:
        return cdata
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def c_support_code(self):
        from theano.gof.cutils import compile_cutils_code
        return compile_cutils_code()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def as_index_variable(idx):
    if idx is None:
        return NoneConst.clone()
    if isinstance(idx, slice):
        return make_slice(idx)
    if isinstance(idx, gof.Variable) and isinstance(idx.type, SliceType):
        return idx
    if isinstance(idx, gof.Variable) and isinstance(idx.type, NoneTypeT):
        return idx
    idx = theano.tensor.as_tensor_variable(idx)
    if idx.type.dtype[:3] not in ('int', 'uin'):
        raise TypeError('index must be integers')
    return idx
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def adv_index_broadcastable_pattern(a, idx):
    """
    This function is only used to determine the broadcast pattern for
    AdvancedSubtensor output variable.

    For this, we make a fake ndarray and a fake idx and call use ask numpy
    the output. From this, we find the output broadcast pattern.

    """

    def replace_slice(v):
        if isinstance(v, gof.Apply):
            if len(v.outputs) != 1:
                raise ValueError(
                    "It is ambiguous which output of a multi-output Op has"
                    " to be fetched.", v)
            else:
                v = v.outputs[0]

        if NoneConst.equals(v):
            return None
        if isinstance(v.type, SliceType):
            return slice(None, None)

        return numpy.zeros((2,) * v.ndim, int)

    newidx = tuple(map(replace_slice, idx))

    # 2 - True = 1; 2 - False = 2
    fakeshape = [2 - bc for bc in a.broadcastable]
    retshape = numpy.empty(fakeshape)[newidx].shape
    return tuple([dim == 1 for dim in retshape])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, a):
        a = as_tensor_variable(a)
        if a.ndim == 0:
            raise ValueError('Nonzero only supports non-scalar arrays.')
        output = [TensorType(dtype='int64', broadcastable=(False, False))()]
        return gof.Apply(self, [a], output)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, N, M, k):
        N = as_tensor_variable(N)
        M = as_tensor_variable(M)
        k = as_tensor_variable(k)
        return gof.Apply(
            self,
            [N, M, k],
            [TensorType(dtype=self.dtype, broadcastable=(False, False))()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, n, m, k):
        n = as_tensor_variable(n)
        m = as_tensor_variable(m)
        k = as_tensor_variable(k)
        assert n.ndim == 0
        assert m.ndim == 0
        assert k.ndim == 0
        return gof.Apply(
            self,
            [n, m, k],
            [TensorType(dtype=self.dtype, broadcastable=(False, False))()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, value, *shape):
        v = as_tensor_variable(value)
        sh, bcast = alloc_validate_shape(shape)
        if v.ndim > len(sh):
            raise TypeError("The Alloc value to use has more dimensions"
                            " than the specified dimensions",
                            v.ndim, len(sh))
        otype = TensorType(dtype=v.dtype, broadcastable=bcast)
        return gof.Apply(self, [v] + sh, [otype()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, x, shp):
        x = as_tensor_variable(x)
        shp_orig = shp
        shp = as_tensor_variable(shp, ndim=1)
        if not (shp.dtype.startswith('int') or
                (isinstance(shp, TensorConstant) and shp.data.size == 0)):
            # It raises an error if shp is not of integer type,
            # except when shp is constant and empty
            # (in this case, shp.dtype does not matter anymore).
            raise TypeError("Shape must be integers", shp, shp.dtype)
        assert shp.ndim == 1
        if isinstance(shp, TensorConstant):
            bcast = [s == 1 for s in shp.data]
            return gof.Apply(self, [x, shp], [tensor(x.type.dtype, bcast)])
        else:
            bcasts = [False] * self.ndim
            shp_list = shp_orig
            if hasattr(shp_orig, "ndim") and shp_orig.ndim == 0:
                shp_list = [shp_orig]
            for index in xrange(self.ndim):
                y = shp_list[index]
                y = as_tensor_variable(y)
                # Try to see if we can infer that y has a constant value of 1.
                # If so, that dimension should be broadcastable.
                try:
                    bcasts[index] = (
                        hasattr(y, 'get_scalar_constant_value') and
                        y.get_scalar_constant_value() == 1)
                except NotScalarConstantError:
                    pass
            return gof.Apply(self, [x, shp], [tensor(x.type.dtype, bcasts)])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, x, reps):
        warnings.warn((
            "Tile op is deprecated, use tile function instead."), stacklevel=3)
        x = as_tensor_variable(x)
        reps = as_tensor_variable(reps)
        return gof.Apply(self, [x, reps], [tensor(x.type.dtype, [False] *
                                                  self.ndim)])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, value, *conds):
        if not isinstance(value, Variable):
            value = T.as_tensor_variable(value)
        cond = [T.as_tensor_variable(c) for c in conds]
        assert numpy.all([c.type.ndim == 0 for c in cond])
        return gof.Apply(self, [value] + cond, [value.type()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def local_reshape_chain(op):
    @gof.local_optimizer([op])
    def f(node):
        """
        Reshape(Reshape(shape1),shape2) -> Reshape(shape2)

        """
        if not opt.check_chain(node, op, op):
            return False

        # TODO: this can permit a failing program to run by eliminating
        #       the lower reshape
        rval = node.op(node.inputs[0].owner.inputs[0], node.inputs[1])
        # It might happen that the desired output of this node has a
        # broadcastable pattern that does not match that of 'rval'. This is
        # when originally, we were able to figure out that one of the
        # dimensions of the reshape is one, but some other transformation
        # replaced the shape by one for which this cannot be guessed.
        # We should try to figure out why we lost the information about this
        # constant value... but in the meantime, better not apply this
        # optimization.
        if rval.broadcastable == node.outputs[0].broadcastable:
            return [rval]
        else:
            return False

    return f
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def c_compile_args(self):
        ret = []

        if self.use_blas():
            ret = blas.ldflags(libs=False, flags=True)
        if (theano.gof.cmodule.gcc_version() in ['4.3.0'] and
                self.kshp == (1, 1)):
            ret += ['-O2']
        # Add the -fopenmp flags
        ret += super(ConvOp, self).c_compile_args()

        return ret
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def __init__(self, **kwargs):
        gof.Op.__init__(self, **kwargs)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, path):
        if isinstance(path, str):
            path = Constant(Generic(), path)
        return gof.Apply(self, [path], [tensor(self.dtype,
                                        broadcastable=self.broadcastable)])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self):
        return gof.Apply(self, [], [theano.Variable(Generic()),
                                    tensor(self.dtype,
                                           broadcastable=self.broadcastable)])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, data):
        return gof.Apply(self, [data],
                               [theano.Variable(Generic()), data.type()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, request, data):
        return gof.Apply(self, [request, data],
                               [theano.Variable(Generic())])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def validate(self, fgraph):
        if not hasattr(fgraph, 'destroyers'):
            return True
        for r in self.protected + list(fgraph.outputs):
            if fgraph.destroyers(r):
                raise gof.InconsistencyError("Trying to destroy a protected"
                                             "Variable.", r)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def free(self):
        """
        When allow_gc = False, clear the Variables in storage_map
        """
        # 1.no allow_gc return False
        # 2.has allow_gc, if allow_gc is False, return True
        if not getattr(self.fn, 'allow_gc', True):
            for key in self.fn.storage_map:
                if not isinstance(key, theano.gof.Constant):
                    self.fn.storage_map[key][0] = None

            for node in self.nodes_with_inner_function:
                ops_with_inner_function[node.op].free()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def wrap_out(output):
        if isinstance(output, SymbolicOutput):
            return output
        elif isinstance(output, gof.Variable):
            return SymbolicOutput(output)
        else:
            raise TypeError("Unknown output type: %s (%s)", type(output),
                            output)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def _check_unused_inputs(self, inputs, outputs, on_unused_input):
        if on_unused_input is None:
            on_unused_input = theano.config.on_unused_input

        if on_unused_input == 'ignore':
            return

        # There should be two categories of variables in inputs:
        #  - variables that have to be provided (used_inputs)
        #  - shared variables that will be updated
        used_inputs = gof.graph.ancestors(
            ([o.variable for o in outputs] +
             [i.update for i in inputs if getattr(i, 'update', False)]),
            blockers=[i.variable for i in inputs])

        msg = ("theano.function was asked to create a function computing "
               "outputs given certain inputs, but the provided input "
               "variable at index %i is not part of the computational graph "
               "needed to compute the outputs: %s.\n%s")
        warn_msg = ("To make this warning into an error, you can pass the "
                    "parameter on_unused_input='raise' to theano.function. "
                    "To disable it completely, use on_unused_input='ignore'.")
        err_msg = ("To make this error into a warning, you can pass the "
                   "parameter on_unused_input='warn' to theano.function. "
                   "To disable it completely, use on_unused_input='ignore'.")

        for i in inputs:
            if ((i.variable not in used_inputs) and (i.update is None)):
                if on_unused_input == 'warn':
                    warnings.warn(msg % (inputs.index(i), i.variable,
                                         warn_msg), stacklevel=6)
                elif on_unused_input == 'raise':
                    raise UnusedInputError(msg % (inputs.index(i),
                                                  i.variable, err_msg))
                else:
                    raise ValueError("Invalid value for keyword "
                                     "on_unused_input of theano.function: "
                                     "'%s'.\nValid values are 'raise', "
                                     "'warn', and 'ignore'." % on_unused_input)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def PatternOptimizer(p1, p2, ign=True):
    return gof.OpKeyOptimizer(gof.PatternSub(p1, p2), ignore_newtrees=ign)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def __init__(self, maker, schedule=None):
        super(gof.LocalLinker, self).__init__()
        self.fgraph = None
        self.maker = maker
        if schedule:
            self.schedule = schedule
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def __init__(self, inputs, outputs, **kwargs):
        if not isinstance(outputs, list):
            raise TypeError('outputs must be list', outputs)
        for i in inputs + outputs:
            if not isinstance(i, gof.Variable):
                raise TypeError(
                    'inputs and outputs must be Variable instances', i)
        if 'updates' in kwargs or 'givens' in kwargs:
            raise TypeError('updates and givens are not allowed in kwargs')

        # To support correctly shared variables the inner fct should
        # not see them. Otherwise their is problem with the gradient.
        self.shared_inputs = [var for var in gof.graph.inputs(outputs)
                              if isinstance(var, SharedVariable)]
        shared_vars = [var.type() for var in self.shared_inputs]
        new = rebuild_collect_shared(outputs, inputs=inputs + shared_vars,
                                     replace=dict(izip(self.shared_inputs,
                                                       shared_vars)),
                                     copy_inputs_over=False)
        (new_inputs, new_outputs,
         [clone_d, update_d, update_expr, shared_inputs]) = new
        assert len(new_inputs) == len(inputs) + len(self.shared_inputs)
        assert len(new_outputs) == len(outputs)
        assert not update_d
        assert not update_expr
        assert not shared_inputs

        self.new_inputs = new_inputs
        self.new_outputs = new_outputs
        self.inputs = inputs
        self.outputs = outputs
        self.kwargs = kwargs
        self.input_types = [input.type for input in inputs]
        self.output_types = [output.type for output in outputs]