Python theano 模块,config() 实例源码

我们从Python开源项目中,提取了以下32个代码示例,用于说明如何使用theano.config()

项目:sampleRNN_ICLR2017    作者:soroushmehr    | 项目源码 | 文件源码
def get_params(cost, criterion=lambda x: hasattr(x, 'param') and x.param==True):
    """
    Default criterion:
        lambda x: hasattr(x, 'param') and x.param==True
    This will return every parameter for cost from computation graph.

    To exclude a parameter, just set 'param' to False:
        >>> h0 = lib.param('h0',\
                numpy.zeros((3, 2*512), dtype=theano.config.floatX))
        >>> print h0.param  # Default: True
        >>> h0.param = False

    In this case one still can get list of all params (False or True) by:
        >>> lib.get_params(cost, lambda x: hasattr(x, 'param')

    :returns:
        A list of params
    """
    return search(cost, criterion)
项目:mimicry.ai    作者:fizerkhan    | 项目源码 | 文件源码
def get_params(cost, criterion=lambda x: hasattr(x, 'param') and x.param==True):
    """
    Default criterion:
        lambda x: hasattr(x, 'param') and x.param==True
    This will return every parameter for cost from computation graph.

    To exclude a parameter, just set 'param' to False:
        >>> h0 = lib.param('h0',\
                numpy.zeros((3, 2*512), dtype=theano.config.floatX))
        >>> print h0.param  # Default: True
        >>> h0.param = False

    In this case one still can get list of all params (False or True) by:
        >>> lib.get_params(cost, lambda x: hasattr(x, 'param')

    :returns:
        A list of params
    """
    return search(cost, criterion)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def parse_config_string(config_string, issue_warnings=True):
    """
    Parses a config string (comma-separated key=value components) into a dict.
    """
    config_dict = {}
    my_splitter = shlex.shlex(config_string, posix=True)
    my_splitter.whitespace = ','
    my_splitter.whitespace_split = True
    for kv_pair in my_splitter:
        kv_pair = kv_pair.strip()
        if not kv_pair:
            continue
        kv_tuple = kv_pair.split('=', 1)
        if len(kv_tuple) == 1:
            if issue_warnings:
                TheanoConfigWarning.warn(
                    ("Config key '%s' has no value, ignoring it"
                        % kv_tuple[0]),
                    stacklevel=1)
        else:
            k, v = kv_tuple
            # subsequent values for k will override earlier ones
            config_dict[k] = v
    return config_dict
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def grad(self, inp, grads):
        x, = inp
        gz, = grads
        gz = as_tensor_variable(gz)
        grad_order = ['x'] * len(x.type.broadcastable)
        for i, v in enumerate(self.new_order):
            if v != 'x':
                grad_order[v] = i
        # Do not make the DimShuffle inplace as an optimization at the
        # canonicalization optimization phase will remove the inplace.
        # The inplace will be reintroduced automatically later in the graph.
        if 'int' in inp[0].dtype:
            return [inp[0].zeros_like(dtype=theano.config.floatX)]
        else:
            return [DimShuffle(gz.type.broadcastable, grad_order)(
                Elemwise(scalar.identity)(gz))]
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def summary(self, file=sys.stderr, n_ops_to_print=20,
                n_apply_to_print=20):
        self.summary_function(file)
        self.summary_globals(file)
        local_time = sum(self.apply_time.values())
        if local_time > 0:
            self.summary_class(file, n_ops_to_print)
            self.summary_ops(file, n_ops_to_print)
            self.summary_nodes(file, n_apply_to_print)
        elif self.fct_callcount > 0:
            print("  No execution time accumulated "
                  "(hint: try config profiling.time_thunks=1)", file=file)
        if config.profiling.debugprint:
            fcts = set([n.fgraph for n in self.apply_time.keys()])
            theano.printing.debugprint(fcts, print_type=True)
        if self.variable_shape or self.variable_strides:
            self.summary_memory(file, n_apply_to_print)
        if self.optimizer_profile:
            print("Optimizer Profile", file=file)
            print("-----------------", file=file)
            self.optimizer_profile[0].print_profile(file,
                                                    self.optimizer_profile[1])
        self.print_tips(file)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def get_c_declare(r, name, sub):
    """
    Wrapper around c_declare that declares py_name.

    """
    # The declaration will be used by the Apply node that
    # is computing it (`r.owner`), and by each of the clients.
    # If some of these have `check_input=True` in their `.op`,
    # it means they need `r`'s dtype to be declared, so
    # we have to pass `check_input=True` to `c_declare`.
    if ((any([getattr(c.op, 'check_input', config.check_input)
              for (c, _) in r.clients
              if not isinstance(c, string_types)]) or
         (r.owner and
          getattr(r.owner.op, 'check_input', config.check_input)))):
        c_declare = r.type.c_declare(name, sub, True)
    else:
        c_declare = r.type.c_declare(name, sub, False)
    pre = """
    PyObject* py_%(name)s;
    """ % locals()
    return pre + c_declare
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_op_struct(self):
        if not theano.config.cxx:
            raise SkipTest("G++ not available, so we need to skip this test.")
        sop = StructOp()
        c = sop(theano.tensor.constant(0))
        mode = None
        if theano.config.mode == 'FAST_COMPILE':
            mode = 'FAST_RUN'
        f = theano.function([], c, mode=mode)
        rval = f()
        assert rval == 0
        rval = f()
        assert rval == 1

        c2 = sop(theano.tensor.constant(1))
        f2 = theano.function([], [c, c2], mode=mode)
        rval = f2()
        assert rval == [0, 0]
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_debug_error_message():
    """tests that debug_error_message raises an
    exception when it should."""

    prev_value = config.compute_test_value

    for mode in ['ignore', 'raise']:

        try:
            config.compute_test_value = mode

            try:
                op.debug_error_message('msg')
                raised = False
            except ValueError:
                raised = True
            assert raised
        finally:
            config.compute_test_value = prev_value
项目:sampleRNN_ICLR2017    作者:soroushmehr    | 项目源码 | 文件源码
def floatX(x):
    """
    Convert `x` to the numpy type specified in `theano.config.floatX`.
    """
    if theano.config.floatX == 'float16':
        return numpy.float16(x)
    elif theano.config.floatX == 'float32':
        return numpy.float32(x)
    else: # Theano's default float type is float64
        print "Warning: lib.floatX using float64"
        return numpy.float64(x)
项目:deep-prior    作者:moberweger    | 项目源码 | 文件源码
def __init__(self, cfgParams, memory_factor):
        """
        Constructor
        :param cfgParams: initialized NetTrainerParams
        :param memory_factor: fraction of memory used for single shared variable
        """

        self.cfgParams = cfgParams

        if not isinstance(cfgParams, NetTrainerParams):
            raise ValueError("cfgParams must be an instance of NetTrainerParams")

        if 'gpu' in theano.config.device:
            # get GPU memory info
            mem_info = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray.mem_info()
            self.memory = (mem_info[0] / 1024 ** 2) / float(memory_factor)  # MB, use third of free memory
        elif 'cpu' in theano.config.device:
            # get CPU memory info
            self.memory = (psutil.virtual_memory().available / 1024 ** 2) / float(memory_factor)  # MB, use third of free memory
        else:
            raise EnvironmentError("Neither GPU nor CPU device in theano.config found!")

        self.currentMacroBatch = -1  # current batch on GPU, load on first run
        self.trainSize = 0
        self.sampleSize = 0
        self.numTrainSamples = 0
        self.numValSamples = 0
        self.managedVar = []
项目:mimicry.ai    作者:fizerkhan    | 项目源码 | 文件源码
def floatX(x):
    """
    Convert `x` to the numpy type specified in `theano.config.floatX`.
    """
    if theano.config.floatX == 'float16':
        return numpy.float16(x)
    elif theano.config.floatX == 'float32':
        return numpy.float32(x)
    else: # Theano's default float type is float64
        print "Warning: lib.floatX using float64"
        return numpy.float64(x)
项目:deep-prior-pp    作者:moberweger    | 项目源码 | 文件源码
def __init__(self, cfgParams, memory_factor, subfolder='./eval/', numChunks=1):
        """
        Constructor
        :param cfgParams: initialized NetTrainerParams
        :param memory_factor: fraction of memory used for single shared variable
        """

        self.subfolder = subfolder
        self.cfgParams = cfgParams
        self.rng = numpy.random.RandomState(23455)

        if not isinstance(cfgParams, NetTrainerParams):
            raise ValueError("cfgParams must be an instance of NetTrainerParams")

        # use fraction of free memory
        if 'gpu' in theano.config.device:
            # get GPU memory info
            mem_info = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray.mem_info()
            self.memorySize = (mem_info[0] / 1024 ** 2) / float(memory_factor)  # MB
        elif 'cpu' in theano.config.device:
            # get CPU memory info
            self.memorySize = (psutil.virtual_memory().available / 1024 ** 2) / float(memory_factor)  # MB
        else:
            raise EnvironmentError("Neither GPU nor CPU device in theano.config found!")

        if cfgParams.para_load is True and numChunks == 1:
            raise ValueError("para_load is True but numChunks == 1, so we do not need para_load!")

        self.currentMacroBatch = -1  # current batch on GPU, load on first run
        self.currentChunk = -1  # current chunk in RAM, load on first run
        self.numChunks = numChunks
        self.trainSize = 0
        self.sampleSize = 0
        self.numTrainSamplesMB = 0
        self.numTrainSamples = 0
        self.numValSamples = 0
        self.epoch = 0
        self.managedVar = []
        self.trainingVar = []
        self.validation_observer = []
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def fetch_val_for_key(key, delete_key=False):
    """Return the overriding config value for a key.
    A successful search returns a string value.
    An unsuccessful search raises a KeyError

    The (decreasing) priority order is:
    - THEANO_FLAGS
    - ~./theanorc

    """

    # first try to find it in the FLAGS
    try:
        if delete_key:
            return THEANO_FLAGS_DICT.pop(key)
        return THEANO_FLAGS_DICT[key]
    except KeyError:
        pass

    # next try to find it in the config file

    # config file keys can be of form option, or section.option
    key_tokens = key.rsplit('.', 1)
    if len(key_tokens) > 2:
        raise KeyError(key)

    if len(key_tokens) == 2:
        section, option = key_tokens
    else:
        section, option = 'global', key
    try:
        try:
            return theano_cfg.get(section, option)
        except ConfigParser.InterpolationError:
            return theano_raw_cfg.get(section, option)
    except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
        raise KeyError(key)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def get_config_md5():
    """
    Return a string md5 of the current config options. It should be such that
    we can safely assume that two different config setups will lead to two
    different strings.

    We only take into account config options for which `in_c_key` is True.
    """
    all_opts = sorted([c for c in _config_var_list if c.in_c_key],
                      key=lambda cv: cv.fullname)
    return theano.gof.utils.hash_from_code('\n'.join(
        ['%s = %s' % (cv.fullname, cv.__get__(True, None)) for cv in all_opts]))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def __set__(self, cls, val):
        if not self.allow_override and hasattr(self, 'val'):
            raise Exception(
                "Can't change the value of this config parameter "
                "after initialization!")
        # print "SETTING PARAM", self.fullname,(cls), val
        if self.filter:
            self.val = self.filter(val)
        else:
            self.val = val
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def grad(self, inp, grads):
        x, = inp
        return [x.zeros_like(theano.config.floatX)]
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def grad(self, inp, grads):
        x, = inp
        return [x.zeros_like(theano.config.floatX)]
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def grad(self, inp, grads):
        x, = inp

        out = self(*inp)

        if out.dtype.find('int') != -1:
            return [x.zeros_like(dtype=theano.config.floatX)]

        gz, = grads
        gz = as_tensor_variable(gz)
        axis = self.axis
        if axis is None:
            axis = list(range(x.type.ndim))
        if axis == ():
            return gz,
        new_dims = []
        i = 0
        for j, _ in enumerate(x.type.broadcastable):
            if j in axis:
                new_dims.append('x')
            else:
                new_dims.append(i)
                i += 1
        ds_op = DimShuffle(gz.type.broadcastable, new_dims)
        gx = Elemwise(scalar.second)(x, ds_op(gz))
        return [gx]
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def c_code_contiguous_disabled(self, node, name, inp, out, sub):
        x, = inp
        z, = out
        if (not theano.config.lib.amdlibm or
                node.inputs[0].dtype != node.outputs[0].dtype):
            raise theano.gof.utils.MethodNotDefined()
        dtype = node.inputs[0].dtype
        if dtype == 'float32' and self.amd_float32 is not None:
            dtype = 'float'
            fct = "amd_vrsa_expf"
        elif dtype == 'float64' and self.amd_float64 is not None:
            dtype = 'double'
            fct = "amd_vrda_exp"
        else:
            raise theano.gof.utils.MethodNotDefined()
        return """
        npy_intp n = PyArray_SIZE(%(z)s);
        %(dtype)s * x = (%(dtype)s*) PyArray_DATA(%(x)s);
        %(dtype)s * z = (%(dtype)s*) PyArray_DATA(%(z)s);
        // We block to keep the data in l1
        // normal l1 size = 32k: 32k/2(input + output)/8(nb bytes of double)=2k
        // We stay bellow the 2k limit to let space for
        // This is faster than the not blocking version
        for(int i=0;i<n;i+=2048){
            npy_intp nb = (n-i<2048)?n-i:2048;
            for(int j=0;j<nb;j++){
                z[i+j] = -x[i+j];
            }
            %(fct)s(nb, z+i, z+i);
            for(int j=0;j<nb;j++){
                z[i+j] = 1.0 /(1.0+z[i+j]);
            }
        }
        """ % locals()
        raise theano.gof.utils.MethodNotDefined()
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def is_1pexp(t, only_process_constants=True):
    """

    Returns
    -------
    object
        If 't' is of the form (1+exp(x)), return (False, x).
        Else return None.

    """
    if t.owner and t.owner.op == tensor.add:
        scalars, scalar_inputs, nonconsts = \
            opt.scalarconsts_rest(t.owner.inputs,
                                  only_process_constants=only_process_constants)
        # scalar_inputs are potentially dimshuffled and filled with scalars
        if len(nonconsts) == 1:
            maybe_exp = nonconsts[0]
            if maybe_exp.owner and maybe_exp.owner.op == tensor.exp:
                # Verify that the constant terms sum to 1.
                if scalars:
                    scal_sum = scalars[0]
                    for s in scalars[1:]:
                        scal_sum = scal_sum + s
                    if numpy.allclose(scal_sum, 1):
                        return False, maybe_exp.owner.inputs[0]
                # Before 7987b51 there used to be a bug where *any* constant
                # was considered as if it was equal to 1, and thus this
                # function would incorrectly identify it as (1 + exp(x)).
                if config.warn.identify_1pexp_bug:
                    warnings.warn(
                        'Although your current code is fine, please note that '
                        'Theano versions prior to 0.5 (more specifically, '
                        'prior to commit 7987b51 on 2011-12-18) may have '
                        'yielded an incorrect result. To remove this warning, '
                        'either set the `warn.identify_1pexp_bug` config '
                        'option to False, or `warn.ignore_bug_before` to at '
                        'least \'0.4.1\'.')
    return None
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def get_module_cache(init_args=None):
    """

    Parameters
    ----------
    init_args
        If not None, the (k, v) pairs in this dictionary will be forwarded to
        the ModuleCache constructor as keyword arguments.

    """
    return cmodule.get_module_cache(config.compiledir, init_args=init_args)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def get_persistent_module_cache():
    global _persistent_module_cache
    if _persistent_module_cache is None:
        _persistent_module_cache = CallCache(os.path.join(config.compiledir,
                                                          'persistent_cache'))
    return _persistent_module_cache
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def get_c_extract(r, name, sub):
    """
    Wrapper around c_extract that initializes py_name from storage.

    """
    # `c_extract` is called when getting the value of an apply node's
    # input from the compute map, before being used by its clients.
    # If one of the clients has `check_input=True`, we need to perform
    # checks on the variable.
    # However that code is not used by C code of the apply node creating
    # this variable, so there is no need to check `r.owner.op.check_input`.
    if any([getattr(c.op, 'check_input', config.check_input)
            for (c, _) in r.clients
            if not isinstance(c, string_types)]):
        # check_broadcast is just an hack to easily remove just the
        # broadcast check on the old GPU back-end. This check isn't
        # done in the new GPU back-end or on the CPU.
        if any([getattr(c.op, 'check_broadcast', True)
                for (c, _) in r.clients
                if not isinstance(c, string_types)]):
            c_extract = r.type.c_extract(name, sub, True)
        else:
            try:
                c_extract = r.type.c_extract(
                    name, sub, True,
                    check_broadcast=False)
            except TypeError as e:
                c_extract = r.type.c_extract(name, sub, True)
    else:
        c_extract = r.type.c_extract(name, sub, False)

    pre = """
    py_%(name)s = PyList_GET_ITEM(storage_%(name)s, 0);
    {Py_XINCREF(py_%(name)s);}
    """ % locals()
    return pre + c_extract
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def compile_cmodule(self, location=None):
        """
        This compiles the source code for this linker and returns a
        loaded module.

        """
        if location is None:
            location = cmodule.dlimport_workdir(config.compiledir)
        mod = self.get_dynamic_module()
        c_compiler = self.c_compiler()
        libs = self.libraries()
        preargs = self.compile_args()
        # We want to compute the code without the lock
        src_code = mod.code()
        get_lock()
        try:
            _logger.debug("LOCATION %s", str(location))
            module = c_compiler.compile_str(
                module_name=mod.code_hash,
                src_code=src_code,
                location=location,
                include_dirs=self.header_dirs(),
                lib_dirs=self.lib_dirs(),
                libs=libs,
                preargs=preargs)
        except Exception as e:
            e.args += (str(self.fgraph),)
            raise
        finally:
            release_lock()
        return module
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def __init__(self,
                 fallback_on_perform=True,
                 allow_gc=None,
                 nice_errors=True,
                 schedule=None):
        if allow_gc is None:
            allow_gc = config.allow_gc
        self.fgraph = None
        self.fallback_on_perform = fallback_on_perform
        self.nice_errors = nice_errors
        self.allow_gc = allow_gc
        if schedule:
            self.schedule = schedule
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_no_perform(self):
        class IncOneC(Op):
            """An Op with only a C (c_code) implementation"""
            __props__ = ()

            def make_node(self, input):
                input = scalar.as_scalar(input)
                output = input.type()
                return Apply(self, [input], [output])

            def c_code(self, node, name, inputs, outputs, sub):
                x, = inputs
                z, = outputs
                return "%(z)s = %(x)s + 1;" % locals()

        i = scalar.int32('i')
        o = IncOneC()(i)

        # Check that the perform function is not implemented
        self.assertRaises((NotImplementedError, utils.MethodNotDefined),
                          o.owner.op.perform,
                          o.owner, 0, [None])

        storage_map = {i: [numpy.int32(3)],
                       o: [None]}
        compute_map = {i: [True],
                       o: [False]}

        thunk = o.owner.op.make_thunk(o.owner, storage_map, compute_map,
                                      no_recycling=[])
        if theano.config.cxx:
            required = thunk()
            # Check everything went OK
            assert not required  # We provided all inputs
            assert compute_map[o][0]
            assert storage_map[o][0] == 4
        else:
            self.assertRaises((NotImplementedError, utils.MethodNotDefined),
                              thunk)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_test_value_op():
    try:
        prev_value = config.compute_test_value
        config.compute_test_value = 'raise'
        x = T.log(numpy.ones((5, 5)))
        v = op.get_test_value(x)

        assert numpy.allclose(v, numpy.zeros((5, 5)))
    finally:
        config.compute_test_value = prev_value
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_get_det_debug_values_ignore():
    """get_debug_values should return [] when debugger is ignore
        and some values are missing """

    prev_value = config.compute_test_value
    try:
        config.compute_test_value = 'ignore'

        x = T.vector()

        for x_val in op.get_debug_values(x):
            assert False

    finally:
        config.compute_test_value = prev_value
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_get_debug_values_success():
    """tests that get_debug_value returns values when available
    (and the debugger is on)"""

    prev_value = config.compute_test_value
    for mode in ['ignore', 'warn', 'raise']:

        try:
            config.compute_test_value = mode

            x = T.vector()
            x.tag.test_value = numpy.zeros((4,), dtype=config.floatX)
            y = numpy.zeros((5, 5))

            iters = 0

            for x_val, y_val in op.get_debug_values(x, y):

                assert x_val.shape == (4,)
                assert y_val.shape == (5, 5)

                iters += 1

            assert iters == 1

        finally:
            config.compute_test_value = prev_value
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_get_debug_values_exc():
    """tests that get_debug_value raises an exception when
        debugger is set to raise and a value is missing """

    prev_value = config.compute_test_value
    try:
        config.compute_test_value = 'raise'

        x = T.vector()

        try:
            for x_val in op.get_debug_values(x):
                # this assert catches the case where we
                # erroneously get a value returned
                assert False
            raised = False
        except AttributeError:
            raised = True

        # this assert catches the case where we got []
        # returned, and possibly issued a warning,
        # rather than raising an exception
        assert raised

    finally:
        config.compute_test_value = prev_value
项目:sampleRNN_ICLR2017    作者:soroushmehr    | 项目源码 | 文件源码
def print_model_settings(locals_var, path=None, sys_arg=False):
    """
    Prints all variables in upper case in locals_var,
    except for T which usually stands for theano.tensor.
    If locals() passed as input to this method, will print
    all the variables in upper case defined so far, that is
    model settings.

    With `path` as an address to a directory it will _append_ it
    as a file named `model_settings.txt` as well.

    With `sys_arg` set to True, log information about Python, Numpy,
    and Theano and passed arguments to the script will be added too.
    args.pkl would be overwritten, specially in case of resuming a job.
    But again that wouldn't be much of a problem as all the passed args
    to the script except for '--resume' should be the same.

    With both `path` and `sys_arg` passed, dumps the theano.config.

    :usage:
        >>> import theano.tensor as T
        >>> import lib
        >>> BATCH_SIZE, DIM = 128, 512
        >>> DATA_PATH = '/Path/to/dataset'
        >>> lib.print_model_settings(locals(), path='./')
    """
    log = ""
    if sys_arg:
        try:
            log += "Python:\n"
            log += "\tsys.version_info\t{}\n".format(str(sys.version_info))
            log += "Numpy:\n"
            log += "\t.__version__\t{}\n".format(numpy.__version__)
            log += "Theano:\n"
            log += "\t.__version__\t{}\n".format(theano.__version__)
            log += "\n\nAll passed args:\n"
            log += str(sys.argv)
            log += "\n"
        except:
            print "Something went wrong during sys_arg logging. Continue anyway!"

    log += "\nModel settings:"
    all_vars = [(k,v) for (k,v) in locals_var.items() if (k.isupper() and k != 'T')]
    all_vars = sorted(all_vars, key=lambda x: x[0])
    for var_name, var_value in all_vars:
        log += ("\n\t%-20s %s" % (var_name, var_value))
    print log
    if path is not None:
        ensure_dir(path)
        # Don't override, just append if by mistake there is something in the file.
        with open(os.path.join(path, __model_setting_file_name), 'a+') as f:
            f.write(log)
        if sys_arg:
            with open(os.path.join(path, 'th_conf.txt'), 'a+') as f:
                f.write(str(theano.config))
            with open(os.path.join(path, 'args.pkl'), 'wb') as f:
                pickle.dump(sys.argv, f)
                # To load:
                # >>> import cPickle as pickle
                # >>> args = pickle.load(open(os.path.join(path, 'args.pkl'), 'rb'))
项目:mimicry.ai    作者:fizerkhan    | 项目源码 | 文件源码
def print_model_settings(locals_var, path=None, sys_arg=False):
    """
    Prints all variables in upper case in locals_var,
    except for T which usually stands for theano.tensor.
    If locals() passed as input to this method, will print
    all the variables in upper case defined so far, that is
    model settings.

    With `path` as an address to a directory it will _append_ it
    as a file named `model_settings.txt` as well.

    With `sys_arg` set to True, log information about Python, Numpy,
    and Theano and passed arguments to the script will be added too.
    args.pkl would be overwritten, specially in case of resuming a job.
    But again that wouldn't be much of a problem as all the passed args
    to the script except for '--resume' should be the same.

    With both `path` and `sys_arg` passed, dumps the theano.config.

    :usage:
        >>> import theano.tensor as T
        >>> import lib
        >>> BATCH_SIZE, DIM = 128, 512
        >>> DATA_PATH = '/Path/to/dataset'
        >>> lib.print_model_settings(locals(), path='./')
    """
    log = ""
    if sys_arg:
        try:
            log += "Python:\n"
            log += "\tsys.version_info\t{}\n".format(str(sys.version_info))
            log += "Numpy:\n"
            log += "\t.__version__\t{}\n".format(numpy.__version__)
            log += "Theano:\n"
            log += "\t.__version__\t{}\n".format(theano.__version__)
            log += "\n\nAll passed args:\n"
            log += str(sys.argv)
            log += "\n"
        except:
            print "Something went wrong during sys_arg logging. Continue anyway!"

    log += "\nModel settings:"
    all_vars = [(k,v) for (k,v) in locals_var.items() if (k.isupper() and k != 'T')]
    all_vars = sorted(all_vars, key=lambda x: x[0])
    for var_name, var_value in all_vars:
        log += ("\n\t%-20s %s" % (var_name, var_value))
    print log
    if path is not None:
        ensure_dir(path)
        # Don't override, just append if by mistake there is something in the file.
        with open(os.path.join(path, __model_setting_file_name), 'a+') as f:
            f.write(log)
        if sys_arg:
            with open(os.path.join(path, 'th_conf.txt'), 'a+') as f:
                f.write(str(theano.config))
            with open(os.path.join(path, 'args.pkl'), 'wb') as f:
                pickle.dump(sys.argv, f)
                # To load:
                # >>> import cPickle as pickle
                # >>> args = pickle.load(open(os.path.join(path, 'args.pkl'), 'rb'))