Python cPickle 模块,dump() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cPickle.dump()

项目:astrobase    作者:waqasbhatti    | 项目源码 | 文件源码
def lcdict_to_pickle(lcdict, outfile=None):
    '''This just writes the lcdict to a pickle.

    If outfile is None, then will try to get the name from the
    lcdict['objectid'] and write to <objectid>-hptxtlc.pkl. If that fails, will
    write to a file named hptxtlc.pkl'.

    '''

    if not outfile and lcdict['objectid']:
        outfile = '%s-hplc.pkl' % lcdict['objectid']
    elif not outfile and not lcdict['objectid']:
        outfile = 'hplc.pkl'

    with open(outfile,'wb') as outfd:
        pickle.dump(lcdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)

    if os.path.exists(outfile):
        LOGINFO('lcdict for object: %s -> %s OK' % (lcdict['objectid'],
                                                    outfile))
        return outfile
    else:
        LOGERROR('could not make a pickle for this lcdict!')
        return None
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def __init__(self, model_nm, cell_nm, attention_type):
        """

        :param model_nm:
        :param cell_nm:
        :param attention_type:
        """
        self.model_nm = model_nm
        self.cell_nm = cell_nm
        self.attention_type = attention_type
        self.last_ckpt = None
        self.last_id = 0
        self.step_save_location = 'steps.p'
        self.data_save_location = 'data'
        self.mapper_save_location = 'mapper.p'
        self.steps_per_ckpt = None
        self.num_steps_per_prediction = None
        self.present_checkpoints = None
        self.outfile = None
        # initialize the steps if not initialized
        if self.step_save_location not in os.listdir(self.get_checkpoint_location()):
            pickle.dump(0,open(self.get_step_file(), 'wb'))
项目:alfred-mpd    作者:deanishe    | 项目源码 | 文件源码
def save(self):
        """Save settings to JSON file specified in ``self._filepath``.

        If you're using this class via :attr:`Workflow.settings`, which
        you probably are, ``self._filepath`` will be ``settings.json``
        in your workflow's data directory (see :attr:`~Workflow.datadir`).
        """
        if self._nosave:
            return
        data = {}
        data.update(self)
        # for key, value in self.items():
        #     data[key] = value
        with LockFile(self._filepath):
            with atomic_writer(self._filepath, 'wb') as file_obj:
                json.dump(data, file_obj, sort_keys=True, indent=2,
                          encoding='utf-8')

    # dict methods
项目:sampleRNN_ICLR2017    作者:soroushmehr    | 项目源码 | 文件源码
def save_training_info(values, path):
    """
    Gets a set of values as dictionary and append them to a log file.
    stores in <path>/train_log.pkl
    """
    file_name = os.path.join(path, __train_log_file_name)
    try:
        with open(file_name, "rb") as f:
            log = pickle.load(f)
    except IOError:  # first time
        log = {}
        for k in values.keys():
            log[k] = []
    for k, v in values.items():
        log[k].append(v)
    with open(file_name, "wb") as f:
        pickle.dump(log, f)
项目:Gank-Alfred-Workflow    作者:hujiaweibujidao    | 项目源码 | 文件源码
def register(self, name, serializer):
        """Register ``serializer`` object under ``name``.

        Raises :class:`AttributeError` if ``serializer`` in invalid.

        .. note::

            ``name`` will be used as the file extension of the saved files.

        :param name: Name to register ``serializer`` under
        :type name: ``unicode`` or ``str``
        :param serializer: object with ``load()`` and ``dump()``
            methods

        """

        # Basic validation
        getattr(serializer, 'load')
        getattr(serializer, 'dump')

        self._serializers[name] = serializer
项目:Gank-Alfred-Workflow    作者:hujiaweibujidao    | 项目源码 | 文件源码
def dump(cls, obj, file_obj):
        """Serialize object ``obj`` to open pickle file.

        .. versionadded:: 1.8

        :param obj: Python object to serialize
        :type obj: Python object
        :param file_obj: file handle
        :type file_obj: ``file`` object

        """

        return pickle.dump(obj, file_obj, protocol=-1)


# Set up default manager and register built-in serializers
项目:Gank-Alfred-Workflow    作者:hujiaweibujidao    | 项目源码 | 文件源码
def save(self):
        """Save settings to JSON file specified in ``self._filepath``

        If you're using this class via :attr:`Workflow.settings`, which
        you probably are, ``self._filepath`` will be ``settings.json``
        in your workflow's data directory (see :attr:`~Workflow.datadir`).
        """
        if self._nosave:
            return
        data = {}
        data.update(self)
        # for key, value in self.items():
        #     data[key] = value
        with LockFile(self._filepath):
            with atomic_writer(self._filepath, 'wb') as file_obj:
                json.dump(data, file_obj, sort_keys=True, indent=2,
                          encoding='utf-8')

    # dict methods
项目:Gank-Alfred-Workflow    作者:hujiaweibujidao    | 项目源码 | 文件源码
def register(self, name, serializer):
        """Register ``serializer`` object under ``name``.

        Raises :class:`AttributeError` if ``serializer`` in invalid.

        .. note::

            ``name`` will be used as the file extension of the saved files.

        :param name: Name to register ``serializer`` under
        :type name: ``unicode`` or ``str``
        :param serializer: object with ``load()`` and ``dump()``
            methods

        """

        # Basic validation
        getattr(serializer, 'load')
        getattr(serializer, 'dump')

        self._serializers[name] = serializer
项目:Gank-Alfred-Workflow    作者:hujiaweibujidao    | 项目源码 | 文件源码
def dump(cls, obj, file_obj):
        """Serialize object ``obj`` to open pickle file.

        .. versionadded:: 1.8

        :param obj: Python object to serialize
        :type obj: Python object
        :param file_obj: file handle
        :type file_obj: ``file`` object

        """

        return pickle.dump(obj, file_obj, protocol=-1)


# Set up default manager and register built-in serializers
项目:Flask_Blog    作者:sugarguo    | 项目源码 | 文件源码
def set(self, key, value, timeout=None):
        if timeout is None:
            timeout = int(time() + self.default_timeout)
        elif timeout != 0:
            timeout = int(time() + timeout)
        filename = self._get_filename(key)
        self._prune()
        try:
            fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix,
                                       dir=self._path)
            with os.fdopen(fd, 'wb') as f:
                pickle.dump(timeout, f, 1)
                pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
            rename(tmp, filename)
            os.chmod(filename, self._mode)
        except (IOError, OSError):
            return False
        else:
            return True
项目:comprehend    作者:Fenugreek    | 项目源码 | 文件源码
def save_params(self, tofile):
        """
        Save params to disk, compatible with fromfile option of constructor.
        If argument is a string, a new file is with that name is written to.
        If argument is a file handle, data is written to that.
        """

        is_handle = type(tofile) == file
        save_file = tofile if is_handle else open(tofile, 'wb')

        for variable in self.params.values():
            cPickle.dump(variable.eval(), save_file, -1)

        for attr in type(self).attr_names:
            cPickle.dump(getattr(self, attr), save_file, -1)

        if not is_handle: save_file.close()
项目:code    作者:ActiveState    | 项目源码 | 文件源码
def ith_prime(i):
    """
    gets prime(i)
    input: number
    return: number
    """
    global primes, interval
    while i >= len(primes):
        a = ((primes[-1] + 2) // 6) * 6 - 1
        b = a + interval
        c = a + 2
        d = b + 2
        try:
            primes.extend(filter(isprime, xrange(a, b, 6)))
            primes.extend(filter(isprime, xrange(c, d, 6)))
            primes = sorted(list(set(primes)))
            mpp = open(fn, 'w')
            cPickle.dump(primes, mpp, protocol = -1)
            mpp.close()
            print 'Prime[%s] = %s' % (fmt_n(len(primes)), fmt_n(primes[-1]))
        except ValueError:
            interval = interval // 2
    return primes[i]
项目:code    作者:ActiveState    | 项目源码 | 文件源码
def make_gc_snapShot(filename, name):
        """Append the signatures to a file, giving them the given
        'name'. A signature is a pair object_id / type_name"""
    global first_time
    if first_time:
        gc.collect()
        first_time = False
    contents = []
    for o in gc.get_objects():
        try:
            tname = o.__class__.__name__
        except AttributeError:
            tname = str(type(o))
        contents.append((id(o), tname))
        del tname
    f = open(filename, 'a')
    pickle.dump((name, contents), f)
    f.close()
    del contents
    del f
项目:dpl    作者:ppengtang    | 项目源码 | 文件源码
def gt_roidb(self):
        """
        Return the database of ground-truth regions of interest.

        This function loads/saves from/to a cache file to speed up future calls.
        """
        cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} gt roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        # gt_roidb = [self._load_pascal_annotation(index)
        gt_roidb = [self._load_pascal_labels(index)
                    for index in self.image_index]
        with open(cache_file, 'wb') as fid:
            cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote gt roidb to {}'.format(cache_file)

        return gt_roidb
项目:dpl    作者:ppengtang    | 项目源码 | 文件源码
def selective_search_IJCV_roidb(self):
        """
        Return the database of selective search regions of interest.
        Ground-truth ROIs are also included.

        This function loads/saves from/to a cache file to speed up future calls.
        """
        cache_file = os.path.join(self.cache_path,
                '{:s}_selective_search_IJCV_top_{:d}_roidb.pkl'.
                format(self.name, self.config['top_k']))

        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} ss roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        gt_roidb = self.gt_roidb()
        ss_roidb = self._load_selective_search_IJCV_roidb(gt_roidb)
        roidb = datasets.imdb.merge_roidbs(gt_roidb, ss_roidb)
        with open(cache_file, 'wb') as fid:
            cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote ss roidb to {}'.format(cache_file)

        return roidb
项目:swjtu-pyscraper    作者:Desgard    | 项目源码 | 文件源码
def set(self, key, value, timeout=None):
        if timeout is None:
            timeout = int(time() + self.default_timeout)
        elif timeout != 0:
            timeout = int(time() + timeout)
        filename = self._get_filename(key)
        self._prune()
        try:
            fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix,
                                       dir=self._path)
            with os.fdopen(fd, 'wb') as f:
                pickle.dump(timeout, f, 1)
                pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
            rename(tmp, filename)
            os.chmod(filename, self._mode)
        except (IOError, OSError):
            return False
        else:
            return True
项目:deep-prior    作者:moberweger    | 项目源码 | 文件源码
def save(self, filename):
        """
        Save the state of this network to a pickle file on disk.
        :param filename: Save the parameters of this network to a pickle file at the named path. If this name ends in
               ".gz" then the output will automatically be gzipped; otherwise the output will be a "raw" pickle.
        :return: None
        """

        state = dict([('class', self.__class__.__name__), ('network', self.__str__())])
        for layer in self.layers:
            key = '{}-values'.format(layer.layerNum)
            state[key] = [p.get_value() for p in layer.params]
        opener = gzip.open if filename.lower().endswith('.gz') else open
        handle = opener(filename, 'wb')
        cPickle.dump(state, handle, -1)
        handle.close()
        print 'Saved model parameter to {}'.format(filename)
项目:sail    作者:GemHunt    | 项目源码 | 文件源码
def create_seed_and_test_random(factor, start_id):
    # Only use 1/factor of the crop images
    # for example there are 10000 crops and a factor of 100
    #then only 100 of them would be the random seed and test images.
    # A factor of 0 would be 100%
    # This should be changed to percent!
    crops = []
    image_ids = []
    for filename in glob.iglob(crop_dir + '*.png'):
        crops.append(filename)

    for filename in crops:
        renamed = filename.replace("_", "")
        image_id = int(renamed.replace('.png', '').replace('/home/pkrush/cents/', ''))
        if image_id < start_id:
            continue
        renamed = crop_dir + str(image_id) + '.png'
        os.rename(filename, renamed)
        rand_int = random.randint(0, factor)
        if rand_int == 0:
            image_ids.append(image_id)
    pickle.dump(image_ids, open(data_dir + 'seed_image_ids.pickle', "wb"))
    pickle.dump(image_ids, open(data_dir + 'test_image_ids.pickle', "wb"))
项目:sail    作者:GemHunt    | 项目源码 | 文件源码
def create_seed_and_test_random(factor, start_id):
    # Only use 1/factor of the crop images
    # for example there are 10000 crops and a factor of 100
    #then only 100 of them would be the random seed and test images.
    # A factor of 0 would be 100%
    # This should be changed to percent!
    crops = []
    image_ids = []
    for filename in glob.iglob(crop_dir + '*.png'):
        crops.append(filename)

    for filename in crops:
        renamed = filename.replace("_", "")
        image_id = int(renamed.replace('.png', '').replace('/home/pkrush/cents/', ''))
        if image_id < start_id:
            continue
        renamed = crop_dir + str(image_id) + '.png'
        os.rename(filename, renamed)
        rand_int = random.randint(0, factor)
        if rand_int == 0:
            image_ids.append(image_id)
    pickle.dump(image_ids, open(data_dir + 'seed_image_ids.pickle', "wb"))
    pickle.dump(image_ids, open(data_dir + 'test_image_ids.pickle', "wb"))
项目:sail    作者:GemHunt    | 项目源码 | 文件源码
def create_new_indexes(total_new_seed_imgs, total_new_test_imgs):
    seeds = pickle.load(open(data_dir + 'seed_data.pickle', "rb"))
    seed_image_ids = []
    test_image_ids = []
    count = 0

    for seed_image_id, values in seeds.iteritems():
        values.sort(key=lambda x: x[0], reverse=False)
        # seed_image_ids.append(values[0:total_new_seed_imgs][2])
        # test_image_ids.append(values[total_new_seed_imgs:total_new_seed_imgs+total_new_test_imgs][2])

        for max_value, angle, image_id in values:
            count += 1
            if count < total_new_seed_imgs:
                seed_image_ids.append(image_id)
            else:
                if count < total_new_seed_imgs + total_new_test_imgs:
                    test_image_ids.append(image_id)
        count = 0
    pickle.dump(seed_image_ids, open(data_dir + 'seed_image_ids.pickle', "wb"))
    pickle.dump(test_image_ids, open(data_dir + 'test_image_ids.pickle', "wb"))
项目:sail    作者:GemHunt    | 项目源码 | 文件源码
def get_ground_truth_dates(total_coin_results):
    #ground_truth_dates = pickle.load(open(data_dir + 'get_ground_truth_dates.pickle', "rb"))
    ground_truth_date_dict = {}
    for seed_id, values in total_coin_results.iteritems():
       for coin_id, result in values.iteritems():
        if coin_id not in ground_truth_date_dict.iterkeys():
            ground_truth_date_dict[coin_id] = [seed_id, 0]
        if result > ground_truth_date_dict[coin_id][1]:
            ground_truth_date_dict[coin_id] = [seed_id, result]

    #it bugs me I am not using a more pythonic way here:
    ground_truth_date_array = []
    for coin_id, values in ground_truth_date_dict.iteritems():
        seed_id = values[0]
        result = values[1]
        ground_truth_date_array.append([seed_id,coin_id, result,0,False,False])

    ground_truth_date_array = sorted(ground_truth_date_array, key=lambda x: x[2],reverse = True)
    ground_truth_date_array = sorted(ground_truth_date_array, key=lambda x: x[0])

    pickle.dump(ground_truth_date_array, open(data_dir + 'ground_truth_dates.pickle', "wb"))
    return ground_truth_date_array
项目:mx-rfcn    作者:giorking    | 项目源码 | 文件源码
def gt_roidb(self):
        """
        return ground truth image regions database
        :return: imdb[image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
        """
        cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} gt roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        gt_roidb = [self.load_pascal_annotation(index) for index in self.image_set_index]
        with open(cache_file, 'wb') as fid:
            cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote gt roidb to {}'.format(cache_file)

        return gt_roidb
项目:mx-rfcn    作者:giorking    | 项目源码 | 文件源码
def selective_search_roidb(self, gt_roidb):
        """
        get selective search roidb and ground truth roidb
        :param gt_roidb: ground truth roidb
        :return: roidb of selective search (ground truth included)
        """
        cache_file = os.path.join(self.cache_path, self.name + '_ss_roidb.pkl')
        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} ss roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        if self.image_set != 'test':
            ss_roidb = self.load_selective_search_roidb(gt_roidb)
            roidb = IMDB.merge_roidbs(gt_roidb, ss_roidb)
        else:
            roidb = self.load_selective_search_roidb(None)
        with open(cache_file, 'wb') as fid:
            cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote ss roidb to {}'.format(cache_file)

        return roidb
项目:mx-rfcn    作者:giorking    | 项目源码 | 文件源码
def gt_roidb(self):
        """
        return ground truth image regions database
        :return: imdb[image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
        """
        cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} gt roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        gt_roidb = [self.load_annotation(index) for index in self.image_set_index]
        with open(cache_file, 'wb') as fid:
            cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote gt roidb to {}'.format(cache_file)

        return gt_roidb
项目:adversarial-frcnn    作者:xiaolonw    | 项目源码 | 文件源码
def gt_roidb(self):
        """
        Return the database of ground-truth regions of interest.

        This function loads/saves from/to a cache file to speed up future calls.
        """
        cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} gt roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        gt_roidb = [self._load_pascal_annotation(index)
                    for index in self.image_index]
        with open(cache_file, 'wb') as fid:
            cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote gt roidb to {}'.format(cache_file)

        return gt_roidb
项目:adversarial-frcnn    作者:xiaolonw    | 项目源码 | 文件源码
def _write_coco_results_file(self, all_boxes, res_file):
        # [{"image_id": 42,
        #   "category_id": 18,
        #   "bbox": [258.15,41.29,348.26,243.78],
        #   "score": 0.236}, ...]
        results = []
        for cls_ind, cls in enumerate(self.classes):
            if cls == '__background__':
                continue
            print 'Collecting {} results ({:d}/{:d})'.format(cls, cls_ind,
                                                          self.num_classes - 1)
            coco_cat_id = self._class_to_coco_cat_id[cls]
            results.extend(self._coco_results_one_category(all_boxes[cls_ind],
                                                           coco_cat_id))
        print 'Writing results json to {}'.format(res_file)
        with open(res_file, 'w') as fid:
            json.dump(results, fid)
项目:workflows.kyoyue    作者:wizyoung    | 项目源码 | 文件源码
def register(self, name, serializer):
        """Register ``serializer`` object under ``name``.

        Raises :class:`AttributeError` if ``serializer`` in invalid.

        .. note::

            ``name`` will be used as the file extension of the saved files.

        :param name: Name to register ``serializer`` under
        :type name: ``unicode`` or ``str``
        :param serializer: object with ``load()`` and ``dump()``
            methods

        """
        # Basic validation
        getattr(serializer, 'load')
        getattr(serializer, 'dump')

        self._serializers[name] = serializer
项目:workflows.kyoyue    作者:wizyoung    | 项目源码 | 文件源码
def save(self):
        """Save settings to JSON file specified in ``self._filepath``.

        If you're using this class via :attr:`Workflow.settings`, which
        you probably are, ``self._filepath`` will be ``settings.json``
        in your workflow's data directory (see :attr:`~Workflow.datadir`).
        """
        if self._nosave:
            return
        data = {}
        data.update(self)
        # for key, value in self.items():
        #     data[key] = value
        with LockFile(self._filepath):
            with atomic_writer(self._filepath, 'wb') as file_obj:
                json.dump(data, file_obj, sort_keys=True, indent=2,
                          encoding='utf-8')

    # dict methods
项目:abusehelper    作者:Exploit-install    | 项目源码 | 文件源码
def run(self):
        state = self._get(None)
        self._put(None, None)

        try:
            state = yield self.errors | self.kill_sessions() | self.main(state)
        except Stop:
            state = None
        finally:
            self._put(None, state)

            if self.file is not None:
                self.file.seek(0)
                self.file.truncate(0)
                pickle.dump(self.state, self.file, pickle.HIGHEST_PROTOCOL)

                self.file.flush()
                unlock_file(self.file)
                self.file.close()
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def save_params(self, weights_file, catched=False):
        """Save the model's parameters."""
        f_dump = open(weights_file, "w")
        params_vls = []
        if catched:
            if self.catched_params != []:
                params_vls = self.catched_params
            else:
                raise ValueError(
                    "You asked to save catched params," +
                    "but you didn't catch any!!!!!!!")
        else:
            for param in self.params:
                params_vls.append(param.get_value())
        pkl.dump(params_vls, f_dump, protocol=pkl.HIGHEST_PROTOCOL)
        f_dump.close()
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def save_params(self, weights_file, catched=False):
        """Save the model's params."""
        with open(weights_file, "w") as f:
            if catched:
                if self.catched_params != []:
                    params_vl = self.catched_params
                else:
                    raise ValueError(
                        "You asked to save catched params," +
                        "but you didn't catch any!!!!!!!")
            else:
                params_vl = [param.get_value() for param in self.params]
            ft_extractor = False
            if self.ft_extractor is not None:
                ft_extractor = True
            stuff = {"layers_infos": self.layers_infos,
                     "params_vl": params_vl,
                     "tag": self.tag,
                     "dropout": self.dropout,
                     "ft_extractor": ft_extractor}
            pkl.dump(stuff, f, protocol=pkl.HIGHEST_PROTOCOL)
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def save_params(self, weights_file, catched=False):
        """Save the model's params."""
        with open(weights_file, "w") as f:
            if catched:
                if self.catched_params != []:
                    params_vl = self.catched_params
                else:
                    raise ValueError(
                        "You asked to save catched params," +
                        "but you didn't catch any!!!!!!!")
            else:
                params_vl = [param.get_value() for param in self.params]
            ft_extractor = False
            if self.ft_extractor is not None:
                ft_extractor = True
            stuff = {"layers_infos": self.layers_infos,
                     "params_vl": params_vl,
                     "tag": self.tag,
                     "dropout": self.dropout,
                     "ft_extractor": ft_extractor,
                     "dic_keys": self.dic_keys,
                     "config_arch": self.config_arch,
                     "crop_size": self.crop_size}
            pkl.dump(stuff, f, protocol=pkl.HIGHEST_PROTOCOL)
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def save_params(self, weights_file, catched=False):
        """Save the model's parameters."""
        f_dump = open(weights_file, "w")
        params_vls = []
        if catched:
            if self.catched_params != []:
                params_vls = self.catched_params
            else:
                raise ValueError(
                    "You asked to save catched params," +
                    "but you didn't catch any!!!!!!!")
        else:
            for param in self.params:
                params_vls.append(param.get_value())
        pkl.dump(params_vls, f_dump, protocol=pkl.HIGHEST_PROTOCOL)
        f_dump.close()
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def set_params(mo, bparams):
    i = 0
    for la in mo.layers:
        we = bparams[i:i+2]
        print len(we)
        la.set_weights(we)
        i += 2
    return mo

#with open("best_model_keras.pkl", 'r') as f:
#    b_params = pkl.load(f)
#
#model = set_params(model, b_params)
#out = model.predict(xvl, batch_size=xvl.shape[0], verbose=0)
#error = np.mean(np.mean(np.power(out - yvl, 2), axis=1))
#print "Error vl", error
#sys.exit()

#init_p = get_params(model)
#with open("init_keras_param.pkl", 'w') as f:
#    pkl.dump(init_p, f)
项目:alphy    作者:maximepeschard    | 项目源码 | 文件源码
def register(self, name, serializer):
        """Register ``serializer`` object under ``name``.

        Raises :class:`AttributeError` if ``serializer`` in invalid.

        .. note::

            ``name`` will be used as the file extension of the saved files.

        :param name: Name to register ``serializer`` under
        :type name: ``unicode`` or ``str``
        :param serializer: object with ``load()`` and ``dump()``
            methods

        """

        # Basic validation
        getattr(serializer, 'load')
        getattr(serializer, 'dump')

        self._serializers[name] = serializer
项目:alphy    作者:maximepeschard    | 项目源码 | 文件源码
def dump(cls, obj, file_obj):
        """Serialize object ``obj`` to open pickle file.

        .. versionadded:: 1.8

        :param obj: Python object to serialize
        :type obj: Python object
        :param file_obj: file handle
        :type file_obj: ``file`` object

        """

        return pickle.dump(obj, file_obj, protocol=-1)


# Set up default manager and register built-in serializers
项目:alphy    作者:maximepeschard    | 项目源码 | 文件源码
def save(self):
        """Save settings to JSON file specified in ``self._filepath``

        If you're using this class via :attr:`Workflow.settings`, which
        you probably are, ``self._filepath`` will be ``settings.json``
        in your workflow's data directory (see :attr:`~Workflow.datadir`).
        """
        if self._nosave:
            return
        data = {}
        data.update(self)
        # for key, value in self.items():
        #     data[key] = value
        with LockFile(self._filepath):
            with atomic_writer(self._filepath, 'wb') as file_obj:
                json.dump(data, file_obj, sort_keys=True, indent=2,
                          encoding='utf-8')

    # dict methods
项目:fast-rcnn-distillation    作者:xiaolonw    | 项目源码 | 文件源码
def gt_roidb(self):
        """
        Return the database of ground-truth regions of interest.

        This function loads/saves from/to a cache file to speed up future calls.
        """
        cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} gt roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        gt_roidb = [self._load_nyud2_annotation(index)
                    for index in self.image_index]
        with open(cache_file, 'wb') as fid:
            cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote gt roidb to {}'.format(cache_file)

        return gt_roidb
项目:fast-rcnn-distillation    作者:xiaolonw    | 项目源码 | 文件源码
def gt_roidb(self):
        """
        Return the database of ground-truth regions of interest.

        This function loads/saves from/to a cache file to speed up future calls.
        """
        cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} gt roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        gt_roidb = [self._load_pascal_annotation(index)
                    for index in self.image_index]
        with open(cache_file, 'wb') as fid:
            cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote gt roidb to {}'.format(cache_file)

        return gt_roidb
项目:faster-rcnn-resnet    作者:Eniac-Xie    | 项目源码 | 文件源码
def gt_roidb(self):
        """
        Return the database of ground-truth regions of interest.

        This function loads/saves from/to a cache file to speed up future calls.
        """
        cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
        if os.path.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} gt roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        gt_roidb = [self._load_pascal_annotation(index)
                    for index in self.image_index]
        with open(cache_file, 'wb') as fid:
            cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote gt roidb to {}'.format(cache_file)

        return gt_roidb
项目:faster-rcnn-resnet    作者:Eniac-Xie    | 项目源码 | 文件源码
def gt_roidb(self):
        """
        Return the database of ground-truth regions of interest.
        This function loads/saves from/to a cache file to speed up future calls.
        """
        cache_file = osp.join(self.cache_path, self.name + '_gt_roidb.pkl')
        if osp.exists(cache_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            print '{} gt roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        gt_roidb = [self._load_coco_annotation(index)
                    for index in self._image_index]

        with open(cache_file, 'wb') as fid:
            cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
        print 'wrote gt roidb to {}'.format(cache_file)
        return gt_roidb
项目:faster-rcnn-resnet    作者:Eniac-Xie    | 项目源码 | 文件源码
def _write_coco_results_file(self, all_boxes, res_file):
        # [{"image_id": 42,
        #   "category_id": 18,
        #   "bbox": [258.15,41.29,348.26,243.78],
        #   "score": 0.236}, ...]
        results = []
        for cls_ind, cls in enumerate(self.classes):
            if cls == '__background__':
                continue
            print 'Collecting {} results ({:d}/{:d})'.format(cls, cls_ind,
                                                          self.num_classes - 1)
            coco_cat_id = self._class_to_coco_cat_id[cls]
            results.extend(self._coco_results_one_category(all_boxes[cls_ind],
                                                           coco_cat_id))
        print 'Writing results json to {}'.format(res_file)
        with open(res_file, 'w') as fid:
            json.dump(results, fid)
项目:zanph    作者:zanph    | 项目源码 | 文件源码
def set(self, key, value, timeout=None):
        if timeout is None:
            timeout = int(time() + self.default_timeout)
        elif timeout != 0:
            timeout = int(time() + timeout)
        filename = self._get_filename(key)
        self._prune()
        try:
            fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix,
                                       dir=self._path)
            with os.fdopen(fd, 'wb') as f:
                pickle.dump(timeout, f, 1)
                pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
            rename(tmp, filename)
            os.chmod(filename, self._mode)
        except (IOError, OSError):
            return False
        else:
            return True
项目:enigma2    作者:OpenLD    | 项目源码 | 文件源码
def saveLocalSettings(self):
        if not config.movielist.settings_per_directory.value:
            return
        try:
            path = os.path.join(config.movielist.last_videodir.value, ".e2settings.pkl")
            file = open(path, "wb")
            pickle.dump(self.settings, file)
            file.close()
        except Exception, e:
            print "[MovieSelection] Failed to save settings to %s: %s" % (path, e)
        # Also set config items, in case the user has a read-only disk
        config.movielist.moviesort.value = self.settings["moviesort"]
        config.movielist.description.value = self.settings["description"]
        config.usage.on_movie_eof.value = self.settings["movieoff"]
        # save moviesort and movieeof values for using by hotkeys
#       config.movielist.moviesort.save()
        config.usage.on_movie_eof.save()
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _split_train_tst(self):
        """
        divide the data into training and testing data
        Create the X_trn, X_tst, for both forward and backward, and Y_trn and Y_tst
        Note that only the reviews are changed, and not the summary.

        :return: None
        """
        num_samples = self.Y.shape[0]
        mapper_file = self.checkpointer.get_mapper_file_location()
        if not self.checkpointer.is_mapper_checkpointed():
            print 'No mapper checkpoint found. Fresh loading in progress ...'
            # Now shuffle the data
            sample_id = range(num_samples)
            random.shuffle(sample_id)
            print 'Dumping the mapper shuffle for reuse.'
            Pickle.dump(sample_id, open(mapper_file, 'wb'))
            print 'Dump complete. Moving Forward...'
        else:
            print 'Mapper Checkpoint found... Reading from mapper dump'
            sample_id = Pickle.load(open(mapper_file, 'rb'))
            print 'Mapping unpickling complete.. Moving forward...'

        self.X_fwd = self.X_fwd[sample_id]
        self.X_bwd = self.X_bwd[sample_id]
        self.Y = self.Y[sample_id]
        # Now divide the data into test ans train set
        test_fraction = 0.01
        self.test_size = int(test_fraction * num_samples)
        self.train_size = num_samples - self.test_size
        # Forward review
        self.X_trn_fwd = self.X_fwd[0:self.train_size]
        self.X_tst_fwd = self.X_fwd[self.train_size:num_samples]
        # Backward review
        self.X_trn_bwd = self.X_bwd[0:self.train_size]
        self.X_tst_bwd = self.X_bwd[self.train_size:num_samples]
        # Summary
        self.Y_trn = self.Y[0:self.train_size]
        self.Y_tst = self.Y[self.train_size:num_samples]
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _split_train_tst(self):
        """
        divide the data into training and testing data
        Create the X_trn, X_tst, and Y_trn and Y_tst
        Note that only the reviews are changed, and not the summary.

        :return: None
        """
        num_samples = self.Y.shape[0]
        mapper_file = self.checkpointer.get_mapper_file_location()
        if not self.checkpointer.is_mapper_checkpointed():
            print 'No mapper checkpoint found. Fresh loading in progress ...'
            # Now shuffle the data
            sample_id = range(num_samples)
            random.shuffle(sample_id)
            print 'Dumping the mapper shuffle for reuse.'
            Pickle.dump(sample_id, open(mapper_file, 'wb'))
            print 'Dump complete. Moving Forward...'
        else:
            print 'Mapper Checkpoint found... Reading from mapper dump'
            sample_id = Pickle.load(open(mapper_file, 'rb'))
            print 'Mapping unpickling complete.. Moving forward...'

        self.X = self.X[sample_id]
        self.Y = self.Y[sample_id]
        # Now divide the data into test ans train set
        test_fraction = 0.01
        self.test_size = int(test_fraction * num_samples)
        self.train_size = num_samples - self.test_size
        # review
        self.X_trn = self.X[0:self.train_size]
        self.X_tst = self.X[self.train_size:num_samples]
        # Summary
        self.Y_trn = self.Y[0:self.train_size]
        self.Y_tst = self.Y[self.train_size:num_samples]
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _split_train_tst(self):
        """
        divide the data into training and testing data
        Create the X_trn, X_tst, for both forward and backward, and Y_trn and Y_tst_fwd
        Note that only the reviews are changed, and not the summary.

        :return: None
        """

        num_samples = self.Y.shape[0]
        mapper_file = self.checkpointer.get_mapper_file_location()
        if not self.checkpointer.is_mapper_checkpointed():
            print 'No mapper checkpoint found. Fresh loading in progress ...'
            # Now shuffle the data
            sample_id = range(num_samples)
            random.shuffle(sample_id)
            print 'Dumping the mapper shuffle for reuse.'
            Pickle.dump(sample_id, open(mapper_file, 'wb'))
            print 'Dump complete. Moving Forward...'
        else:
            print 'Mapper Checkpoint found... Reading from mapper dump'
            sample_id = Pickle.load(open(mapper_file, 'rb'))
            print 'Mapping unpickling complete.. Moving forward...'

        self.X = self.X[sample_id]
        self.Y = self.Y[sample_id]
        # Now divide the data into test ans train set
        test_fraction = 0.01
        self.test_size = int(test_fraction * num_samples)
        self.train_size = num_samples - self.test_size
        # review
        self.X_trn = self.X[0:self.train_size]
        self.X_tst = self.X[self.train_size:num_samples]
        # Summary
        self.Y_trn = self.Y[0:self.train_size]
        self.Y_tst = self.Y[self.train_size:num_samples]
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _split_train_tst(self):
        """
        divide the data into training and testing data
        Create the X_trn, X_tst, for both forward and backward, and Y_trn and Y_tst
        Note that only the reviews are changed, and not the summary.

        :return: None
        """
        num_samples = self.Y.shape[0]
        mapper_file = self.checkpointer.get_mapper_file_location()
        if not self.checkpointer.is_mapper_checkpointed():
            print 'No mapper checkpoint found. Fresh loading in progress ...'
            # Now shuffle the data
            sample_id = range(num_samples)
            random.shuffle(sample_id)
            print 'Dumping the mapper shuffle for reuse.'
            Pickle.dump(sample_id, open(mapper_file, 'wb'))
            print 'Dump complete. Moving Forward...'
        else:
            print 'Mapper Checkpoint found... Reading from mapper dump'
            sample_id = Pickle.load(open(mapper_file, 'rb'))
            print 'Mapping unpickling complete.. Moving forward...'

        self.X_fwd = self.X_fwd[sample_id]
        self.X_bwd = self.X_bwd[sample_id]
        self.Y = self.Y[sample_id]
        # Now divide the data into test ans train set
        test_fraction = 0.01
        self.test_size = int(test_fraction * num_samples)
        self.train_size = num_samples - self.test_size
        # Forward review
        self.X_trn_fwd = self.X_fwd[0:self.train_size]
        self.X_tst_fwd = self.X_fwd[self.train_size:num_samples]
        # Backward review
        self.X_trn_bwd = self.X_bwd[0:self.train_size]
        self.X_tst_bwd = self.X_bwd[self.train_size:num_samples]
        # Summary
        self.Y_trn = self.Y[0:self.train_size]
        self.Y_tst = self.Y[self.train_size:num_samples]
项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def save(self, path):
        if not os.path.exists(path): os.makedirs(path)
        self.src_vocab.save(path+"/vocab.src")
        self.tgt_vocab.save(path+"/vocab.tgt")
        self.m.save(path+"/params")
        with open(path+"/args", "w") as f: pickle.dump(self.args, f)
项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def save(self, filename):
        info_dict = {
            "tokens":self.tokens,
            "strings":self.strings,
            "s2t":dict(self.s2t),
            "i2t":dict(self.i2t),
            "unk":self.unk,
            "START_TOK":self.START_TOK,
            "END_TOK":self.END_TOK
        }
        with open(filename, "w") as f: pickle.dump(info_dict, f)