Python theano 模块,Variable() 实例源码

我们从Python开源项目中,提取了以下31个代码示例,用于说明如何使用theano.Variable()

项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def get_flags(*types):
        def get_dtype(t):
            if isinstance(t, string_types):
                return numpy.dtype(t)
            elif isinstance(t, Type):
                return t.dtype
            elif isinstance(t, Variable):
                return t.type.dtype
            else:
                raise TypeError("can't get a dtype from %s" % (type(t),))
        dtypes = [get_dtype(t) for t in types]
        flags = dict(cluda=True)
        if any(d == numpy.float64 for d in dtypes):
            flags['have_double'] = True
        if any(d.itemsize < 4 for d in dtypes):
            flags['have_small'] = True
        if any(d.kind == 'c' for d in dtypes):
            flags['have_complex'] = True
        if any(d == numpy.float16 for d in dtypes):
            flags['have_half'] = True
        return flags
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def _is_sparse_variable(x):
    """

    Returns
    -------
    boolean
        True iff x is a L{SparseVariable} (and not a L{tensor.TensorType},
        for instance).

    """
    if not isinstance(x, gof.Variable):
        raise NotImplementedError("this function should only be called on "
                                  "*variables* (of type sparse.SparseType "
                                  "or tensor.TensorType, for instance), not ",
                                  x)
    return isinstance(x.type, SparseType)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, img, kern):
        # Make sure both inputs are Variables with the same Type
        if not isinstance(img, theano.Variable):
            img = as_tensor_variable(img)
        if not isinstance(kern, theano.Variable):
            kern = as_tensor_variable(kern)
        ktype = img.type.clone(dtype=kern.dtype,
                               broadcastable=kern.broadcastable)
        kern = ktype.filter_variable(kern)

        if img.type.ndim != 2 + self.convdim:
            raise TypeError('img must be %dD tensor' % (2 + self.convdim))
        if kern.type.ndim != 2 + self.convdim:
            raise TypeError('kern must be %dD tensor' % (2 + self.convdim))

        broadcastable = [img.broadcastable[0],
                         kern.broadcastable[0]] + ([False] * self.convdim)
        output = img.type.clone(broadcastable=broadcastable)()
        return Apply(self, [img, kern], [output])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, img, topgrad, shape):
        # Make sure both inputs are Variables with the same Type
        if not isinstance(img, theano.Variable):
            img = as_tensor_variable(img)
        if not isinstance(topgrad, theano.Variable):
            topgrad = as_tensor_variable(topgrad)
        gtype = img.type.clone(dtype=topgrad.dtype,
                               broadcastable=topgrad.broadcastable)
        topgrad = gtype.filter_variable(topgrad)

        if img.type.ndim != 2 + self.convdim:
            raise TypeError('img must be %dD tensor' % (2 + self.convdim))
        if topgrad.type.ndim != 2 + self.convdim:
            raise TypeError('topgrad must be %dD tensor' % (2 + self.convdim))

        shape = as_tensor_variable(shape)
        broadcastable = [topgrad.broadcastable[1],
                         img.broadcastable[1]] + ([False] * self.convdim)
        output = img.type.clone(broadcastable=broadcastable)()
        return Apply(self, [img, topgrad, shape], [output])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, kern, topgrad, shape):
        # Make sure both inputs are Variables with the same Type
        if not isinstance(kern, theano.Variable):
            kern = as_tensor_variable(kern)
        if not isinstance(topgrad, theano.Variable):
            topgrad = as_tensor_variable(topgrad)
        gtype = kern.type.clone(dtype=topgrad.dtype,
                                broadcastable=topgrad.broadcastable)
        topgrad = gtype.filter_variable(topgrad)

        if kern.type.ndim != 2 + self.convdim:
            raise TypeError('kern must be %dD tensor' % (2 + self.convdim))
        if topgrad.type.ndim != 2 + self.convdim:
            raise TypeError('topgrad must be %dD tensor' % (2 + self.convdim))

        shape = as_tensor_variable(shape)
        broadcastable = [topgrad.type.broadcastable[0],
                         kern.type.broadcastable[1]] + ([False] * self.convdim)
        output = kern.type.clone(broadcastable=broadcastable)()
        return Apply(self, [kern, topgrad, shape], [output])
项目:pl-cnn    作者:oval-group    | 项目源码 | 文件源码
def make_node(self, x, maxout, gz):
        # make_node should only be called by the grad function of
        # Pool, so these asserts should not fail.
        assert isinstance(x, Variable) and x.ndim == 4
        assert isinstance(maxout, Variable) and maxout.ndim == 4
        assert isinstance(gz, Variable) and gz.ndim == 4
        x = tensor.as_tensor_variable(x)
        maxout = tensor.as_tensor_variable(maxout)
        gz = tensor.as_tensor_variable(gz)

        return Apply(self, [x, maxout, gz], [x.type()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def ensure_dt(val, default, name, dtype):
    if dtype == 'float16':
        dtype = 'float32'
    if val is None:
        val = default.clone()
    if not isinstance(val, Variable):
        val = constant(val)
    if hasattr(val, 'ndim') and val.ndim == 0:
        val = as_scalar(val)
    if not isinstance(val.type, theano.scalar.Scalar):
        raise TypeError("%s: expected a scalar value" % (name,))
    if not val.type.dtype == dtype:
        val = val.astype(dtype)
    return val
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def filter_variable(self, other, allow_convert=True):
        from theano.gpuarray.basic_ops import gpu_from_host

        if hasattr(other, '_as_GpuArrayVariable'):
            other = other._as_GpuArrayVariable(self.context_name)

        if not isinstance(other, Variable):
            other = self.Constant(type=self, data=other)

        if other.type == self:
            return other

        if not isinstance(other.type, tensor.TensorType):
            raise TypeError('Incompatible type', (self, other.type))
        if (other.type.dtype != self.dtype):
            raise TypeError('Incompatible dtype', (self.dtype,
                                                   other.type.dtype))
        if other.type.ndim != self.ndim:
            raise TypeError('Incompatible number of dimensions.'
                            ' Expected %d, got %d.' % (self.ndim, other.ndim))
        if other.type.broadcastable != self.broadcastable:
            if allow_convert:
                type2 = other.type.clone(broadcastable=self.broadcastable)
                other2 = type2.convert_variable(other)
            else:
                other2 = None
            if other2 is None:
                raise TypeError('Incompatible broadcastable dimensions.'
                                ' Expected %s, got %s.' %
                                (str(other.type.broadcastable),
                                 str(self.broadcastable)))
            other = other2

        return gpu_from_host(self.context_name)(other)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_variable(self, name=None):
        return self.Variable(self, name=name)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def c_code_cache_version(self):
        ver = pygpu.gpuarray.api_version()
        return (0, ver[0])

    # Variable, Contstant, ... not declared
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, condition, *monitored_vars):

        # Ensure that condition is a theano tensor
        if not isinstance(condition, theano.Variable):
            condition = theano.tensor.as_tensor_variable(condition)

        # Validate that the condition is a scalar (else it is not obvious how
        # is should be evaluated)
        assert (condition.ndim == 0)

        # Because the user might be tempted to instantiate PdbBreakpoint only
        # once and apply it many times on different number of inputs, we must
        # create a new instance of the op here, define the instance attributes
        # (view_map and var_types) in that instance and then apply it on the
        # inputs.
        new_op = PdbBreakpoint(name=self.name)
        new_op.view_map = {}
        new_op.inp_types = []
        for i in range(len(monitored_vars)):
            # Every output i is a view of the input i+1 because of the input
            # condition.
            new_op.view_map[i] = [i + 1]
            new_op.inp_types.append(monitored_vars[i].type)

        # Build the Apply node
        inputs = [condition] + list(monitored_vars)
        outputs = [inp.type() for inp in monitored_vars]
        return Apply(op=new_op, inputs=inputs, outputs=outputs)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def _is_dense_variable(x):
    """

    Returns
    -------
    boolean
        True if x is a L{tensor.TensorType} (and not a L{SparseVariable},
        for instance).

    """
    if not isinstance(x, gof.Variable):
        raise NotImplementedError("this function should only be called on "
                                  "*variables* (of type sparse.SparseType or "
                                  "tensor.TensorType, for instance), not ", x)
    return isinstance(x.type, tensor.TensorType)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def as_sparse_variable(x, name=None):
    """
    Wrapper around SparseVariable constructor to construct
    a Variable with a sparse matrix with the same dtype and
    format.

    Parameters
    ----------
    x
        A sparse matrix.

    Returns
    -------
    object
        SparseVariable version of `x`.

    """

    # TODO
    # Verify that sp is sufficiently sparse, and raise a
    # warning if it is not

    if isinstance(x, gof.Apply):
        if len(x.outputs) != 1:
            raise ValueError("It is ambiguous which output of a "
                             "multi-output Op has to be fetched.", x)
        else:
            x = x.outputs[0]
    if isinstance(x, gof.Variable):
        if not isinstance(x.type, SparseType):
            raise TypeError("Variable type field must be a SparseType.", x,
                            x.type)
        return x
    try:
        return constant(x, name=name)
    except TypeError:
        raise TypeError("Cannot convert %s to SparseType" % x, type(x))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, data, indices, indptr, shape):
        data = tensor.as_tensor_variable(data)

        if not isinstance(indices, gof.Variable):
            indices_ = numpy.asarray(indices)
            indices_32 = theano._asarray(indices, dtype='int32')
            assert (indices_ == indices_32).all()
            indices = indices_32
        if not isinstance(indptr, gof.Variable):
            indptr_ = numpy.asarray(indptr)
            indptr_32 = theano._asarray(indptr, dtype='int32')
            assert (indptr_ == indptr_32).all()
            indptr = indptr_32
        if not isinstance(shape, gof.Variable):
            shape_ = numpy.asarray(shape)
            shape_32 = theano._asarray(shape, dtype='int32')
            assert (shape_ == shape_32).all()
            shape = shape_32

        indices = tensor.as_tensor_variable(indices)
        indptr = tensor.as_tensor_variable(indptr)
        shape = tensor.as_tensor_variable(shape)

        if data.type.ndim != 1:
            raise TypeError('data argument must be a vector', data.type,
                            data.type.ndim)
        if indices.type.ndim != 1 or indices.type.dtype not in discrete_dtypes:
            raise TypeError('indices must be vector of integers', indices,
                            indices.type)
        if indptr.type.ndim != 1 or indptr.type.dtype not in discrete_dtypes:
            raise TypeError('indices must be vector of integers', indptr,
                            indptr.type)
        if shape.type.ndim != 1 or shape.type.dtype not in discrete_dtypes:
            raise TypeError('n_rows must be integer type', shape, shape.type)

        return gof.Apply(self,
                         [data, indices, indptr, shape],
                         [SparseType(dtype=data.type.dtype,
                                     format=self.format)()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, x, maxout, gz, ws, stride=None, pad=None):
        # make_node should only be called by the grad function of
        # Pool, so these asserts should not fail.
        x = tensor.as_tensor_variable(x)
        maxout = tensor.as_tensor_variable(maxout)
        gz = tensor.as_tensor_variable(gz)
        nd = self.ndim
        if stride is None:
            stride = ws
        if pad is None:
            pad = (0,) * nd
        ws = tensor.as_tensor_variable(ws)
        stride = tensor.as_tensor_variable(stride)
        pad = tensor.as_tensor_variable(pad)
        assert isinstance(x, Variable) and x.ndim >= nd
        assert isinstance(maxout, Variable) and maxout.ndim >= nd
        assert isinstance(gz, Variable) and gz.ndim >= nd
        assert isinstance(ws, Variable) and ws.ndim == 1
        assert isinstance(stride, Variable) and stride.ndim == 1
        assert isinstance(pad, Variable) and pad.ndim == 1
        assert x.ndim == maxout.ndim == gz.ndim >= nd
        if not ws.dtype.startswith('int'):
            raise TypeError('Pool downsample parameters must be ints.')
        if not stride.dtype.startswith('int'):
            raise TypeError('Stride parameters must be ints.')
        if not pad.dtype.startswith('int'):
            raise TypeError('Padding parameters must be ints.')
        return Apply(self, [x, maxout, gz, ws, stride, pad], [x.type()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, x, gz, ws, stride=None, pad=None, dummy=None):
        # make_node should only be called by the grad function of
        # Pool, so these asserts should not fail.
        x = tensor.as_tensor_variable(x)
        gz = tensor.as_tensor_variable(gz)
        nd = self.ndim
        if stride is None:
            stride = ws
        if pad is None:
            pad = (0,) * nd
        ws = tensor.as_tensor_variable(ws)
        stride = tensor.as_tensor_variable(stride)
        pad = tensor.as_tensor_variable(pad)
        assert isinstance(x, Variable) and x.ndim >= nd
        assert isinstance(gz, Variable) and gz.ndim >= nd
        assert isinstance(ws, Variable) and ws.ndim == 1
        assert isinstance(stride, Variable) and stride.ndim == 1
        assert x.ndim == gz.ndim >= nd
        assert isinstance(pad, Variable) and pad.ndim == 1
        if not ws.dtype.startswith('int'):
            raise TypeError('Pool downsample parameters must be ints.')
        if not stride.dtype.startswith('int'):
            raise TypeError('Stride parameters must be ints.')
        if not pad.dtype.startswith('int'):
            raise TypeError('Padding parameters must be ints.')
        return Apply(self, [x, gz, ws, stride, pad], [x.type()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, _x):
        if not isinstance(_x, theano.Variable):
            x = as_tensor_variable(_x)
        else:
            x = _x

        if x.type.ndim != 2:
            raise TypeError('ExtractDiag only works on matrices', _x)
        return Apply(self, [x], [x.type.__class__(broadcastable=(False,),
                                                  dtype=x.type.dtype)()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test0(self):
        path = Variable(Generic())
        # Not specifying mmap_mode defaults to None, and the data is
        # copied into main memory
        x = tensor.load(path, 'int32', (False,))
        y = x * 2
        fn = function([path], y)
        assert (fn(self.filename) == (self.data * 2)).all()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test1(self):
        path = Variable(Generic())
        # 'c' means "copy-on-write", which allow the array to be overwritten
        # by an inplace Op in the graph, without modifying the underlying
        # file.
        x = tensor.load(path, 'int32', (False,), 'c')
        # x ** 2 has been chosen because it will work inplace.
        y = (x ** 2).sum()
        fn = function([path], y)
        # Call fn() twice, to check that inplace ops do not cause trouble
        assert (fn(self.filename) == (self.data ** 2).sum()).all()
        assert (fn(self.filename) == (self.data ** 2).sum()).all()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_memmap(self):
        path = Variable(Generic())
        x = tensor.load(path, 'int32', (False,), mmap_mode='c')
        fn = function([path], x)
        assert type(fn(self.filename)) == numpy.core.memmap
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self):
        return gof.Apply(self, [], [theano.Variable(Generic()),
                                    tensor(self.dtype,
                                           broadcastable=self.broadcastable)])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, data):
        return gof.Apply(self, [data],
                               [theano.Variable(Generic()), data.type()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, request, data):
        return gof.Apply(self, [request, data],
                               [theano.Variable(Generic())])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, x):
        # Must work for all type that have a shape attribute.
        # This will fail at execution time.
        if not isinstance(x, theano.Variable):
            x = theano.tensor.as_tensor_variable(x)
        return gof.Apply(self, [x], [theano.tensor.lvector()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_node(self, x):
        # x could be one of a number of types
        # the only thing we require is that the variable have a .ndim,
        # and that the value have a .shape
        if not isinstance(x, theano.Variable):
            raise TypeError('x must be Variable with ndim attribute', x)
        if x.ndim <= self.i:
            raise TypeError('x has too few dimensions for Shape_i',
                            (x, self.i))
        return theano.Apply(self, [x], [theano.tensor.lscalar()])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def forced_replace(out, x, y):
    """
    Check all internal values of the graph that compute the variable ``out``
    for occurrences of values identical with ``x``. If such occurrences are
    encountered then they are replaced with variable ``y``.

    Parameters
    ----------
    out : Theano Variable
    x : Theano Variable
    y : Theano Variable

    Examples
    --------
    out := sigmoid(wu)*(1-sigmoid(wu))
    x := sigmoid(wu)
    forced_replace(out, x, y) := y*(1-y)

    """
    if out is None:
        return None

    # ``visited`` is a set of nodes that are already known and don't need to be
    # checked again, speeding up the traversal of multiply-connected graphs.
    visited = set()
    def local_traverse(graph, x):
        if graph in visited:
            return []
        visited.add(graph)
        if equal_computations([graph], [x]):
            return [graph]
        elif not graph.owner:
            return []
        else:
            rval = []
            for inp in graph.owner.inputs:
                rval += local_traverse(inp, x)
            return rval
    to_replace = local_traverse(out, x)
    return clone(out, replace=OrderedDict((v, y) for v in to_replace))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def ensure_float(val, default, name):
    if val is None:
        return default.clone()
    if not isinstance(val, Variable):
        val = constant(val)
    if hasattr(val, 'ndim') and val.ndim == 0:
        val = as_scalar(val)
    if not isinstance(val.type, theano.scalar.Scalar):
        raise TypeError("%s: expected a scalar value" % (name,))
    if not val.type.dtype == 'float32':
        raise TypeError("%s: type is not float32" % (name,))
    return val
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def make_variable(self, name=None):
        """
        Return a `TensorVariable` of this type.

        Parameters
        ----------
        name : str
            A pretty name to identify this `Variable` when printing and
            debugging.

        """
        return self.Variable(self, name=name)
项目:bigan    作者:jeffdonahue    | 项目源码 | 文件源码
def __init__(self, value, shape=None, index_max=None):
        """
        value: A Theano Tensor, shared variable, or constant value.
        shape: May be None (default) if non-symbolic shape is accessible by
             value.get_value().shape (as in a Theano shared variable --
             tried first) or by value.shape (as in a NumPy array).
             Otherwise (e.g., if value is a symbolic Theano tensor), shape
             should be specified as an iterable of ints, where some may be -1
             for don't cares (e.g., batch size).
        index_max: If value is integer-typed, index_max may be used
             to specify its maximum value.  e.g., a batch of N one-hot vectors,
             each representing a word in a 500 word vocabulary, could be
             specified with an integer-typed Tensor with values in
             [0, 1, ..., 499], and index_max=500.
        """
        if isinstance(value, Output):
            raise TypeError("value may not be an Output")
        self.value = value
        if shape is None:
            try:
                shape = value.get_value().shape
            except AttributeError:
                try:
                    shape = value.shape
                    if isinstance(shape, theano.Variable):
                        shape = None
                except AttributeError:
                    pass
        if shape is not None:
            for s in list(shape) + ([] if (index_max is None) else [index_max]):
                assert isinstance(s, int)
                assert s >= 0
            shape = tuple(shape)
            assert len(shape) == value.ndim
        self.shape = shape
        if index_max is not None:
            assert isinstance(value, int) or str(value.dtype).startswith('int'), \
                ('if index_max is given, value must be integer-typed; '
                 'was: %s' % value.dtype)
            assert index_max == int(index_max)
            index_max = int(index_max)
            if index_max < 0:
                raise ValueError('index_max must be non-negative')
        self.index_max = index_max
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def __ComparisonSwitch(SS, SD, DS):
    """

    Parameters
    ----------
    SS
        Function to apply between two sparses matrices.
    SD
        Function to apply between a sparse and a dense matrix.
    DS
        Function to apply between a dense and a sparse matrix.

    Returns
    -------
    function
        Switch function taking two matrices as input.

    Notes
    -----
    At least one of `x` and `y` must be a sparse matrix.

    DS swap input as a dense matrix cannot be a left operand.

    """

    def helper(x, y):

        scipy_ver = [int(n) for n in scipy.__version__.split('.')[:2]]

        assert scipy_ver >= [0, 13]

        if hasattr(x, 'getnnz'):
            x = as_sparse_variable(x)
        if hasattr(y, 'getnnz'):
            y = as_sparse_variable(y)
        if not isinstance(x, theano.Variable):
            x = theano.tensor.as_tensor_variable(x)
        if not isinstance(y, theano.Variable):
            y = theano.tensor.as_tensor_variable(y)

        x_is_sparse_variable = _is_sparse_variable(x)
        y_is_sparse_variable = _is_sparse_variable(y)

        assert x_is_sparse_variable or y_is_sparse_variable
        if x_is_sparse_variable and y_is_sparse_variable:
            return SS(x, y)
        elif x_is_sparse_variable and not y_is_sparse_variable:
            return SD(x, y)
        elif y_is_sparse_variable and not x_is_sparse_variable:
            return DS(y, x)
        else:
            raise NotImplementedError()

    return helper
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def filter_variable(self, other, allow_convert=True):
        """
        Convert a Variable into a CudaNdarrayType, if compatible.

        This Variable should either already be a CudaNdarrayType, or be
        a TensorType. It has to have the right number of dimensions,
        broadcastable pattern, and dtype.

        """
        if hasattr(other, '_as_CudaNdarrayVariable'):
            other = other._as_CudaNdarrayVariable()

        if not isinstance(other, Variable):
            # The value is not a Variable: we cast it into
            # a Constant of the appropriate Type.
            other = self.Constant(type=self, data=other)

        if other.type == self:
            return other

        if not isinstance(other.type, (tensor.TensorType, CudaNdarrayType)):
            raise TypeError('Incompatible type', (self, other.type))
        if (other.type.dtype != self.dtype):
            raise TypeError('Incompatible dtype', (self.dtype,
                                                   other.type.dtype))
        if other.type.ndim != self.ndim:
            raise TypeError('Incompatible number of dimensions.'
                            ' Expected %d, got %d.' % (self.ndim, other.ndim))
        if other.type.broadcastable != self.broadcastable:
            if allow_convert:
                type2 = other.type.clone(broadcastable=self.broadcastable)
                other2 = type2.convert_variable(other)
            else:
                other2 = None
            if other2 is None:
                raise TypeError('Incompatible broadcastable dimensions.'
                                ' Expected %s, got %s.' %
                                (str(other.type.broadcastable),
                                 str(self.broadcastable)))
            other = other2

        return theano.sandbox.cuda.basic_ops.GpuFromHost()(other)