Python keras.optimizers 模块,get() 实例源码

我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用keras.optimizers.get()

项目:vinci    作者:Phylliade    | 项目源码 | 文件源码
def clone_optimizer(optimizer):
    if type(optimizer) is str:
        return optimizers.get(optimizer)
    # Requires Keras 1.0.7 since get_config has breaking changes.
    params = dict([(k, v) for k, v in optimizer.get_config().items()])
    config = {
        'class_name': optimizer.__class__.__name__,
        'config': params,
    }
    if hasattr(optimizers, 'optimizer_from_config'):
        # COMPATIBILITY: Keras < 2.0
        clone = optimizers.optimizer_from_config(config)
    else:
        clone = optimizers.deserialize(config)
    return clone
项目:keras_experiments    作者:avolkov1    | 项目源码 | 文件源码
def compile(self, *args, **kwargs):
        '''Refer to Model.compile docstring for parameters. Override
        functionality is documented below.

        :override compile: Override Model.compile method to check for options
            that the optimizer is multi-gpu enabled, and synchronize initial
            variables.
        '''
        initsync = self._initsync
        usenccl = self._usenccl

        opt = kwargs['optimizer']
        # if isinstance(opt, str):
        if not isinstance(opt, KO.Optimizer):
            opt = KO.get(opt)
            kwargs['optimizer'] = opt

        if self._syncopt and not getattr(opt, 'ismgpu', False):
            raise RuntimeError(
                'Multi-GPU synchronization model requires a multi-GPU '
                'optimizer. Instead got: {}'.format(opt))

        opt.usenccl = usenccl

        if self._enqueue_ops:
            # Produces a warning that kwargs are ignored for Tensorflow. Patch
            # Function in tensorflow_backend to use the enqueue_ops option.
            kwargs['fetches'] = self._enqueue_ops

        super(ModelMGPU, self).compile(*args, **kwargs)

        if initsync:
            self._run_initsync()
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def clone_optimizer(optimizer):
    if type(optimizer) is str:
        return optimizers.get(optimizer)
    # Requires Keras 1.0.7 since get_config has breaking changes.
    params = dict([(k, v) for k, v in optimizer.get_config().items()])
    config = {
        'class_name': optimizer.__class__.__name__,
        'config': params,
    }
    if hasattr(optimizers, 'optimizer_from_config'):
        # COMPATIBILITY: Keras < 2.0
        clone = optimizers.optimizer_from_config(config)
    else:
        clone = optimizers.deserialize(config)
    return clone
项目:speech_ml    作者:coopie    | 项目源码 | 文件源码
def load_model_old(path_to_model_dir):
    warnings.warn('`load_model` called. This is a deprected function!')
    model = model_from_yaml(open(path_to_model_dir + '/config.yaml').read())
    model.load_weights(path_to_model_dir + '/weights.hdf5')
    compile_args = yaml_to_dict(path_to_model_dir + '/compile_args.yaml')

    optimizer = compile_args.pop('optimizer')
    if isinstance(optimizer, dict):
        name = optimizer.pop('name')
        optimizer = get_optimizer(name, optimizer)
    else:
        optimizer = get_optimizer(optimizer)

    model.compile(optimizer=optimizer, **compile_args)
    return model
项目:X    作者:EderSantana    | 项目源码 | 文件源码
def compile(self, state_dim_values, lr=0.2, policy_rule="maxrand", init_value=None):
        """Build and initialize table with all possible state values.
           state_dim_values consists of a tuple of arrays or lists - each array
           gives every possible value for the corresponding dimension.
        """

        self.policy_rule = policies.get(policy_rule)

        if init_value is None:
            self.init_value = np.zeros(self.num_actions)
        else:
            self.init_value = init_value

        self.table = {key: np.array(self.init_value) for key in list(itertools.product(*state_dim_values))}
        self.lr = lr
项目:X    作者:EderSantana    | 项目源码 | 文件源码
def values(self, observation):
        if observation.ndim == 1:
            vals = self.table[tuple(observation)]
        else:
            obs_tuple = tuple(map(tuple, observation))  # convert to tuple of tuples
            vals = map(self.table.__getitem__, obs_tuple)  # get values from dict as list of arrays
        vals = np.asarray(vals)  # convert list of arrays to matrix (2-d array)
        return vals
项目:keras_experiments    作者:avolkov1    | 项目源码 | 文件源码
def all_sync_params(tower_params, devices, usenccl=True):
    """Assigns the params from the first tower to all others"""
    if len(devices) == 1:
        return tf.no_op()
    sync_ops = []
    if have_nccl and usenccl:
        for param_on_devices in zip(*tower_params):
            # print('PARAM_ON_DEVICES: {}'.format(param_on_devices))  # DEBUG
            # Note: param_on_devices is [paramX_gpu0, paramX_gpu1, ...]
            param0 = param_on_devices[0]
            send_op, received_tensors = nccl.broadcast(param0, devices[1:])
            sync_ops.append(send_op)
            for device, param, received in zip(devices[1:],
                                               param_on_devices[1:],
                                               received_tensors):
                with tf.device(device):
                    sync_op = param.assign(received)
                    sync_ops.append(sync_op)
    else:
        params0 = tower_params[0]
        for device, params in zip(devices, tower_params):
            with tf.device(device):
                for param, param0 in zip(params, params0):
                    sync_op = param.assign(param0.read_value())
                    sync_ops.append(sync_op)

    return tf.group(*sync_ops)


# def stage(tensors):
#     """Stages the given tensors in a StagingArea for asynchronous put/get.
#     """
#     stage_area = data_flow_ops.StagingArea(
#         dtypes=[tensor.dtype for tensor in tensors],
#         shapes=[tensor.get_shape() for tensor in tensors])
#     put_op = stage_area.put(tensors)
#     get_tensors = stage_area.get()
#     if not isinstance(get_tensors, list):
#         get_tensors = [get_tensors]
#     # print('GET_TENSORS: {}'.format(get_tensors))  # DEBUG
#
#     get_tensors = [tf.reshape(gt, t.get_shape())
#                    for (gt, t) in zip(get_tensors, tensors)]
#     return put_op, get_tensors