Python mmap 模块,mmap() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用mmap.mmap()

项目:kinect-2-libras    作者:inessadl    | 项目源码 | 文件源码
def _malloc(self, size):
        # returns a large enough block -- it might be much larger
        i = bisect.bisect_left(self._lengths, size)
        if i == len(self._lengths):
            length = self._roundup(max(self._size, size), mmap.PAGESIZE)
            self._size *= 2
            info('allocating a new mmap of length %d', length)
            arena = Arena(length)
            self._arenas.append(arena)
            return (arena, 0, length)
        else:
            length = self._lengths[i]
            seq = self._len_to_seq[length]
            block = seq.pop()
            if not seq:
                del self._len_to_seq[length], self._lengths[i]

        (arena, start, stop) = block
        del self._start_to_block[(arena, start)]
        del self._stop_to_block[(arena, stop)]
        return block
项目:jx-sqlite    作者:mozilla    | 项目源码 | 文件源码
def __init__(self, stream, length, _shared=None):
        """
        :param stream:  THE STREAM WE WILL GET THE BYTES FROM
        :param length:  THE MAX NUMBER OF BYTES WE ARE EXPECTING
        :param _shared: FOR INTERNAL USE TO SHARE THE BUFFER
        :return:
        """
        self.position = 0
        file_ = TemporaryFile()
        if not _shared:
            self.shared = Data(
                length=length,
                locker=Lock(),
                stream=stream,
                done_read=0,
                file=file_,
                buffer=mmap(file_.fileno(), length)
            )
        else:
            self.shared = _shared

        self.shared.ref_count += 1
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def remoteSceneChanged(self, data):
        w, h, size, newfile = data
        #self._sizeHint = (whint, hhint)
        if self.shm is None or self.shm.size != size:
            if self.shm is not None:
                self.shm.close()
            if sys.platform.startswith('win'):
                self.shmtag = newfile   ## on windows, we create a new tag for every resize
                self.shm = mmap.mmap(-1, size, self.shmtag) ## can't use tmpfile on windows because the file can only be opened once.
            elif sys.platform == 'darwin':
                self.shmFile.close()
                self.shmFile = open(self._view.shmFileName(), 'r')
                self.shm = mmap.mmap(self.shmFile.fileno(), size, mmap.MAP_SHARED, mmap.PROT_READ)
            else:
                self.shm = mmap.mmap(self.shmFile.fileno(), size, mmap.MAP_SHARED, mmap.PROT_READ)
        self.shm.seek(0)
        data = self.shm.read(w*h*4)
        self._img = QtGui.QImage(data, w, h, QtGui.QImage.Format_ARGB32)
        self._img.data = data  # data must be kept alive or PySide 1.2.1 (and probably earlier) will crash.
        self.update()
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __init__(self, *args, **kwds):
        ## Create shared memory for rendered image
        #pg.dbg(namespace={'r': self})
        if sys.platform.startswith('win'):
            self.shmtag = "pyqtgraph_shmem_" + ''.join([chr((random.getrandbits(20)%25) + 97) for i in range(20)])
            self.shm = mmap.mmap(-1, mmap.PAGESIZE, self.shmtag) # use anonymous mmap on windows
        else:
            self.shmFile = tempfile.NamedTemporaryFile(prefix='pyqtgraph_shmem_')
            self.shmFile.write(b'\x00' * (mmap.PAGESIZE+1))
            fd = self.shmFile.fileno()
            self.shm = mmap.mmap(fd, mmap.PAGESIZE, mmap.MAP_SHARED, mmap.PROT_WRITE)
        atexit.register(self.close)

        GraphicsView.__init__(self, *args, **kwds)
        self.scene().changed.connect(self.update)
        self.img = None
        self.renderTimer = QtCore.QTimer()
        self.renderTimer.timeout.connect(self.renderView)
        self.renderTimer.start(16)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def remoteSceneChanged(self, data):
        w, h, size, newfile = data
        #self._sizeHint = (whint, hhint)
        if self.shm is None or self.shm.size != size:
            if self.shm is not None:
                self.shm.close()
            if sys.platform.startswith('win'):
                self.shmtag = newfile   ## on windows, we create a new tag for every resize
                self.shm = mmap.mmap(-1, size, self.shmtag) ## can't use tmpfile on windows because the file can only be opened once.
            elif sys.platform == 'darwin':
                self.shmFile.close()
                self.shmFile = open(self._view.shmFileName(), 'r')
                self.shm = mmap.mmap(self.shmFile.fileno(), size, mmap.MAP_SHARED, mmap.PROT_READ)
            else:
                self.shm = mmap.mmap(self.shmFile.fileno(), size, mmap.MAP_SHARED, mmap.PROT_READ)
        self.shm.seek(0)
        data = self.shm.read(w*h*4)
        self._img = QtGui.QImage(data, w, h, QtGui.QImage.Format_ARGB32)
        self._img.data = data  # data must be kept alive or PySide 1.2.1 (and probably earlier) will crash.
        self.update()
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __init__(self, *args, **kwds):
        ## Create shared memory for rendered image
        #pg.dbg(namespace={'r': self})
        if sys.platform.startswith('win'):
            self.shmtag = "pyqtgraph_shmem_" + ''.join([chr((random.getrandbits(20)%25) + 97) for i in range(20)])
            self.shm = mmap.mmap(-1, mmap.PAGESIZE, self.shmtag) # use anonymous mmap on windows
        else:
            self.shmFile = tempfile.NamedTemporaryFile(prefix='pyqtgraph_shmem_')
            self.shmFile.write(b'\x00' * (mmap.PAGESIZE+1))
            fd = self.shmFile.fileno()
            self.shm = mmap.mmap(fd, mmap.PAGESIZE, mmap.MAP_SHARED, mmap.PROT_WRITE)
        atexit.register(self.close)

        GraphicsView.__init__(self, *args, **kwds)
        self.scene().changed.connect(self.update)
        self.img = None
        self.renderTimer = QtCore.QTimer()
        self.renderTimer.timeout.connect(self.renderView)
        self.renderTimer.start(16)
项目:kAFL    作者:RUB-SysSec    | 项目源码 | 文件源码
def wipe(self):
        filter_bitmap_fd = os.open("/dev/shm/kafl_filter0", os.O_RDWR | os.O_SYNC | os.O_CREAT)
        os.ftruncate(filter_bitmap_fd, self.config.config_values['BITMAP_SHM_SIZE'])
        filter_bitmap = mmap.mmap(filter_bitmap_fd, self.config.config_values['BITMAP_SHM_SIZE'], mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ)
        for i in range(self.config.config_values['BITMAP_SHM_SIZE']):
            filter_bitmap[i] = '\x00'
        filter_bitmap.close()
        os.close(filter_bitmap_fd)

        filter_bitmap_fd = os.open("/dev/shm/kafl_tfilter", os.O_RDWR | os.O_SYNC | os.O_CREAT)
        os.ftruncate(filter_bitmap_fd, 0x1000000)
        filter_bitmap = mmap.mmap(filter_bitmap_fd, 0x1000000, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ)
        for i in range(0x1000000):
            filter_bitmap[i] = '\x00'
        filter_bitmap.close()
        os.close(filter_bitmap_fd)
项目:kAFL    作者:RUB-SysSec    | 项目源码 | 文件源码
def __set_binary(self, filename, binaryfile, max_size):
        shm_fd = os.open(filename, os.O_RDWR | os.O_SYNC | os.O_CREAT)
        os.ftruncate(shm_fd, max_size)
        shm = mmap.mmap(shm_fd, max_size, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ)
        shm.seek(0x0)
        shm.write('\x00' * max_size)
        shm.seek(0x0)

        f = open(binaryfile, "rb")
        bytes = f.read(1024)
        if bytes:
            shm.write(bytes)
        while bytes != "":
            bytes = f.read(1024)
            if bytes:
                shm.write(bytes)

        f.close()
        shm.close()
        os.close(shm_fd)
项目:kAFL    作者:RUB-SysSec    | 项目源码 | 文件源码
def init(self):
        self.control = socket.socket(socket.AF_UNIX)
        while True:
            try:
                self.control.connect(self.control_filename)
                #self.control.connect(self.control_filename)
                break
            except socket_error:
                pass
                #time.sleep(0.01)

        self.kafl_shm_f     = os.open(self.bitmap_filename, os.O_RDWR | os.O_SYNC | os.O_CREAT)
        self.fs_shm_f       = os.open(self.payload_filename, os.O_RDWR | os.O_SYNC | os.O_CREAT)
        #argv_fd             = os.open(self.argv_filename, os.O_RDWR | os.O_SYNC | os.O_CREAT)
        os.ftruncate(self.kafl_shm_f, self.bitmap_size)
        os.ftruncate(self.fs_shm_f, (128 << 10))
        #os.ftruncate(argv_fd, (4 << 10))

        self.kafl_shm       = mmap.mmap(self.kafl_shm_f, self.bitmap_size, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ)
        self.fs_shm         = mmap.mmap(self.fs_shm_f, (128 << 10),  mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ)

        return True
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def _malloc(self, size):
        # returns a large enough block -- it might be much larger
        i = bisect.bisect_left(self._lengths, size)
        if i == len(self._lengths):
            length = self._roundup(max(self._size, size), mmap.PAGESIZE)
            self._size *= 2
            info('allocating a new mmap of length %d', length)
            arena = Arena(length)
            self._arenas.append(arena)
            return (arena, 0, length)
        else:
            length = self._lengths[i]
            seq = self._len_to_seq[length]
            block = seq.pop()
            if not seq:
                del self._len_to_seq[length], self._lengths[i]

        (arena, start, stop) = block
        del self._start_to_block[(arena, start)]
        del self._stop_to_block[(arena, stop)]
        return block
项目:BrundleFuzz    作者:carlosgprado    | 项目源码 | 文件源码
def _initialize_shared_memory(self):
        """
        This is the IPC channel between us (Python)
        and the PinTool (C/C++)
        """
        s_uint32 = self.utils.get_size_uint32()
        shm_name = "Local\\NaFlSharedMemory"

        self.shm_size = self.bitmap_size * s_uint32  # architecture dependent :)
        self.shm = mmap.mmap(0,
                        self.shm_size,
                        shm_name,
                        access = mmap.ACCESS_WRITE)

        if not self.shm:
            # Oops!
            self.ml.info('[!] Could not create the shared memory region')
            self.ml.info('[!] Aborting...')
            sys.exit(1)
项目:sahara_emulator    作者:bkerler    | 项目源码 | 文件源码
def __init__(self, filename, block_size):
        self.filename = filename
        self.block_size = block_size

        try:
            statinfo = os.stat(self.filename)
            self.size = statinfo.st_size
            self.file = open(self.filename, 'r+b')
            self.image = mmap(self.file.fileno(), 0)
        except:
            print('''
----------------------------------------------------------------------
No disk image named '%s' was found.
You can use the disk image from umap2/data/fat32.3M.stick.img
as a small disk image (extract it using `tar xvf fat32.3M.stick.img`)
----------------------------------------------------------------------
            ''' % (filename))
            raise Exception('No file named %s found.' % (filename))
项目:zippy    作者:securesystemslab    | 项目源码 | 文件源码
def _malloc(self, size):
        # returns a large enough block -- it might be much larger
        i = bisect.bisect_left(self._lengths, size)
        if i == len(self._lengths):
            length = self._roundup(max(self._size, size), mmap.PAGESIZE)
            self._size *= 2
            info('allocating a new mmap of length %d', length)
            arena = Arena(length)
            self._arenas.append(arena)
            return (arena, 0, length)
        else:
            length = self._lengths[i]
            seq = self._len_to_seq[length]
            block = seq.pop()
            if not seq:
                del self._len_to_seq[length], self._lengths[i]

        (arena, start, stop) = block
        del self._start_to_block[(arena, start)]
        del self._stop_to_block[(arena, stop)]
        return block
项目:zippy    作者:securesystemslab    | 项目源码 | 文件源码
def malloc(self, size):
        # return a block of right size (possibly rounded up)
        assert 0 <= size < sys.maxsize
        if os.getpid() != self._lastpid:
            self.__init__()                     # reinitialize after fork
        self._lock.acquire()
        self._free_pending_blocks()
        try:
            size = self._roundup(max(size,1), self._alignment)
            (arena, start, stop) = self._malloc(size)
            new_stop = start + size
            if new_stop < stop:
                self._free((arena, new_stop, stop))
            block = (arena, start, new_stop)
            self._allocated_blocks.add(block)
            return block
        finally:
            self._lock.release()

#
# Class representing a chunk of an mmap -- can be inherited
#
项目:ipfirst    作者:cutd    | 项目源码 | 文件源码
def __init__(self,file_name):       
        try:
            path = os.path.abspath(file_name)
            self._handle= open(path, "rb")
            if mmap is not None:
                self.data = mmap.mmap(self._handle.fileno(), 0, access=mmap.ACCESS_READ)
            else:
                self.data = self._handle.read()

            self.dict={}       
            self.start =self.int_from_4byte(0)
            index_last_offset = self.int_from_4byte(4)
            prefix_start_offset = self.int_from_4byte(8) 
            prefix_end_offset = self.int_from_4byte(12)           
            i=prefix_start_offset
            while i <= prefix_end_offset:                 
                prefix =self.int_from_1byte(i)               
                map_dict={ 'prefix':prefix , 'start_index':self.int_from_4byte(i+1), 'end_index':self.int_from_4byte(i+5) }
                self.dict[prefix]=map_dict
                i+=9

        except Exception as ex:
            print "cannot open file %s" % file
            print ex.message
            exit(0)
项目:Windows-Prefetch-Carver    作者:PoorBillionaire    | 项目源码 | 文件源码
def main():
    p = ArgumentParser()
    p.add_argument('-f', '--file', help='Carve Prefetch files from the given file', required=True)
    p.add_argument('-o', '--outfile', help='Write results to the given file', required=True)
    p.add_argument('-c', '--csv', help='Output results in csv format', action='store_true')
    p.add_argument('-m', '--mactime', help='Output results in mactime format', action='store_true')
    p.add_argument('-t', '--tln', help='Output results in tln format', action='store_true')
    p.add_argument('-s', '--system', help='System name (use with -t)')

    args = p.parse_args()

    with open(args.file, 'rb') as i:
        with contextlib.closing(mmap.mmap(i.fileno(), 0 , access=mmap.ACCESS_READ)) as m:
            with open(args.outfile, 'wb') as o:
                if args.tln:
                    prefetchCarve(m, o, "tln", system_name=args.system)
                elif args.csv:
                    o.write(u'last_run_time,prefetch_file_name,run_count\n')
                    prefetchCarve(m, o, output_type="csv")
                elif args.mactime:
                    prefetchCarve(m, o, output_type="mactime")
                else:
                    prefetchCarve(m, o)
项目:build    作者:fuchsia-mirror    | 项目源码 | 文件源码
def mmapper(filename):
    """A context manager that yields (fd, file_contents) given a file name.
This ensures that the mmap and file objects are closed at the end of the
'with' statement."""
    fileobj = open(filename, 'rb')
    fd = fileobj.fileno()
    if os.fstat(fd).st_size == 0:
        # mmap can't handle empty files.
        try:
            yield fd, ''
        finally:
            fileobj.close()
    else:
        mmapobj = mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
        try:
            yield fd, mmapobj
        finally:
            mmapobj.close()
            fileobj.close()


# elf_info objects are only created by `get_elf_info` or the `copy` or
# `rename` methods.
项目:oil    作者:oilshell    | 项目源码 | 文件源码
def _malloc(self, size):
        # returns a large enough block -- it might be much larger
        i = bisect.bisect_left(self._lengths, size)
        if i == len(self._lengths):
            length = self._roundup(max(self._size, size), mmap.PAGESIZE)
            self._size *= 2
            info('allocating a new mmap of length %d', length)
            arena = Arena(length)
            self._arenas.append(arena)
            return (arena, 0, length)
        else:
            length = self._lengths[i]
            seq = self._len_to_seq[length]
            block = seq.pop()
            if not seq:
                del self._len_to_seq[length], self._lengths[i]

        (arena, start, stop) = block
        del self._start_to_block[(arena, start)]
        del self._stop_to_block[(arena, stop)]
        return block
项目:python2-tracer    作者:extremecoders-re    | 项目源码 | 文件源码
def _malloc(self, size):
        # returns a large enough block -- it might be much larger
        i = bisect.bisect_left(self._lengths, size)
        if i == len(self._lengths):
            length = self._roundup(max(self._size, size), mmap.PAGESIZE)
            self._size *= 2
            info('allocating a new mmap of length %d', length)
            arena = Arena(length)
            self._arenas.append(arena)
            return (arena, 0, length)
        else:
            length = self._lengths[i]
            seq = self._len_to_seq[length]
            block = seq.pop()
            if not seq:
                del self._len_to_seq[length], self._lengths[i]

        (arena, start, stop) = block
        del self._start_to_block[(arena, start)]
        del self._stop_to_block[(arena, stop)]
        return block
项目:sslstrip-hsts-openwrt    作者:adde88    | 项目源码 | 文件源码
def _malloc(self, size):
        # returns a large enough block -- it might be much larger
        i = bisect.bisect_left(self._lengths, size)
        if i == len(self._lengths):
            length = self._roundup(max(self._size, size), mmap.PAGESIZE)
            self._size *= 2
            info('allocating a new mmap of length %d', length)
            arena = Arena(length)
            self._arenas.append(arena)
            return (arena, 0, length)
        else:
            length = self._lengths[i]
            seq = self._len_to_seq[length]
            block = seq.pop()
            if not seq:
                del self._len_to_seq[length], self._lengths[i]

        (arena, start, stop) = block
        del self._start_to_block[(arena, start)]
        del self._stop_to_block[(arena, stop)]
        return block
项目:Cayenne-Agent    作者:myDevicesIoT    | 项目源码 | 文件源码
def __init__(self):
        if not NativeGPIO.instance:
            GPIOPort.__init__(self, 54)
            self.export = range(54)
            self.post_value = True
            self.post_function = True
            self.gpio_setup = []
            self.gpio_reset = []
            self.valueFile = [0 for i in range(54)]
            self.functionFile = [0 for i in range(54)]
            for i in range(54):
                # Export the pins here to prevent a delay when accessing the values for the 
                # first time while waiting for the file group to be set
                self.__checkFilesystemExport__(i)
            try:
                with open('/dev/gpiomem', 'rb') as gpiomem:
                    self.gpio_map = mmap.mmap(gpiomem.fileno(), BLOCK_SIZE, prot=mmap.PROT_READ)
            except OSError as err:
                error(err)
            NativeGPIO.instance = self
项目:meter    作者:qianqians    | 项目源码 | 文件源码
def unload_sheet(self, sheet_name_or_index):
        if isinstance(sheet_name_or_index, int):
            sheetx = sheet_name_or_index
        else:
            try:
                sheetx = self._sheet_names.index(sheet_name_or_index)
            except ValueError:
                raise XLRDError('No sheet named <%r>' % sheet_name_or_index)
        self._sheet_list[sheetx] = None

    ##
    # This method has a dual purpose. You can call it to release
    # memory-consuming objects and (possibly) a memory-mapped file
    # (mmap.mmap object) when you have finished loading sheets in
    # on_demand mode, but still require the Book object to examine the
    # loaded sheets. It is also called automatically (a) when open_workbook
    # raises an exception and (b) if you are using a "with" statement, when 
    # the "with" block is exited. Calling this method multiple times on the 
    # same object has no ill effect.
项目:OneClickDTU    作者:satwikkansal    | 项目源码 | 文件源码
def unload_sheet(self, sheet_name_or_index):
        if isinstance(sheet_name_or_index, int):
            sheetx = sheet_name_or_index
        else:
            try:
                sheetx = self._sheet_names.index(sheet_name_or_index)
            except ValueError:
                raise XLRDError('No sheet named <%r>' % sheet_name_or_index)
        self._sheet_list[sheetx] = None

    ##
    # This method has a dual purpose. You can call it to release
    # memory-consuming objects and (possibly) a memory-mapped file
    # (mmap.mmap object) when you have finished loading sheets in
    # on_demand mode, but still require the Book object to examine the
    # loaded sheets. It is also called automatically (a) when open_workbook
    # raises an exception and (b) if you are using a "with" statement, when 
    # the "with" block is exited. Calling this method multiple times on the 
    # same object has no ill effect.
项目:fusion360-dxf-export    作者:opendesk    | 项目源码 | 文件源码
def unload_sheet(self, sheet_name_or_index):
        if isinstance(sheet_name_or_index, int):
            sheetx = sheet_name_or_index
        else:
            try:
                sheetx = self._sheet_names.index(sheet_name_or_index)
            except ValueError:
                raise XLRDError('No sheet named <%r>' % sheet_name_or_index)
        self._sheet_list[sheetx] = None

    ##
    # This method has a dual purpose. You can call it to release
    # memory-consuming objects and (possibly) a memory-mapped file
    # (mmap.mmap object) when you have finished loading sheets in
    # on_demand mode, but still require the Book object to examine the
    # loaded sheets. It is also called automatically (a) when open_workbook
    # raises an exception and (b) if you are using a "with" statement, when 
    # the "with" block is exited. Calling this method multiple times on the 
    # same object has no ill effect.
项目:find_circ2    作者:rajewsky-lab    | 项目源码 | 文件源码
def __init__(self,path,chrom,sense,system='hg19',**kwargs):
        super(GenomeAccessor,self).__init__(path,chrom,sense,system=system,**kwargs)
        self.logger = logging.getLogger("GenomeAccessor")
        self.logger.info("# mmap: Loading genomic sequence for chromosome %s from '%s'" % (chrom,path))

        self.system = system
        self.data = None
        fname = os.path.join(path)
        try:
            self.data = indexed_fasta(fname)
        except IOError:
            self.logger.warning("Could not access '%s'. Switching to dummy mode (only Ns)" % fname)

            self.get_data = self.get_dummy
            self.get_oriented = self.get_dummy
            self.covered_strands = [chrom+'+',chrom+'-']
        else:
            # register for all chroms/strands
            self.covered_strands = [chrom+'+' for chrom in self.data.chrom_stats.keys()] + [chrom+'-' for chrom in self.data.chrom_stats.keys()]

        # TODO: maybe remove this if not needed
        self.get = self.get_oriented
项目:find_circ    作者:rajewsky-lab    | 项目源码 | 文件源码
def __init__(self,path,chrom,sense,system='hg19',**kwargs):
        super(GenomeAccessor,self).__init__(path,chrom,sense,system=system,**kwargs)
        debug("# GenomeAccessor mmap: Loading genomic sequence for chromosome %s from '%s'" % (chrom,path))

        self.system = system
        self.data = None
        fname = os.path.join(path)
        try:
            self.data = indexed_fasta(fname)
        except IOError:
            warning("Could not access '%s'. Switching to dummy mode (only Ns)" % fname)

            self.get_data = self.get_dummy
            self.get_oriented = self.get_dummy
            self.covered_strands = [chrom+'+',chrom+'-']
        else:
            # register for all chroms/strands
            self.covered_strands = [chrom+'+' for chrom in self.data.chrom_stats.keys()] + [chrom+'-' for chrom in self.data.chrom_stats.keys()]

        # TODO: maybe remove this if not needed
        self.get = self.get_oriented
项目:rom-info    作者:drx    | 项目源码 | 文件源码
def __enter__(self):
        self.file.__enter__()
        try:
            self.mmap = mmap.mmap(self.file.fileno(), 0, access=self.mmap_access)
        except ValueError as e:
            if 'mmap length' in str(e) and sys.maxsize < 2**32:
                raise SystemExit('Could not open {}.\n\n'
                                 'This is an issue with 32-bit mmap.'
                                 ' Please install 64-bit Python to handle files this big.'.format(self.file_name))
            else:
                raise SystemExit('Could not open {}: {}', self.file_name, e)
        except OSError as e:
            if e.winerror == 8 and sys.maxsize < 2**32:
                raise SystemExit('Could not open {}.\n\n'
                                 'This is an issue with 32-bit Python/Windows and mmap.'
                                 ' Please install 64-bit Python to handle files this big.'.format(self.file_name))

        return self
项目:cas_python_sdk    作者:tencentyun    | 项目源码 | 文件源码
def compute_etag_from_file_obj(file_obj, offset=0, size=None, chunk_size=1024 * 1024):
    etag = hashlib.sha256()

    size = size or os.fstat(file_obj.fileno()).st_size - offset

    if size != 0 and offset % mmap.ALLOCATIONGRANULARITY == 0:
        target = mmap.mmap(file_obj.fileno(), length=size,
                           offset=offset,
                           access=mmap.ACCESS_READ)
    else:
        target = file_obj
        target.seek(offset)

    while size > 0:
        data = target.read(chunk_size)
        etag.update(data[:min(len(data), size)])
        size -= len(data)

    if target is file_obj:
        file_obj.seek(offset)
    else:
        target.close()
    s = etag.hexdigest()
    return s
项目:cas_python_sdk    作者:tencentyun    | 项目源码 | 文件源码
def compute_tree_etag_from_file_obj(file_obj, offset=0, size=None,
                                    chunk_size=1024 * 1024):
    generator = TreeHashGenerator()

    size = size or os.fstat(file_obj.fileno()).st_size - offset
    if size != 0 and offset % mmap.ALLOCATIONGRANULARITY == 0:
        target = mmap.mmap(file_obj.fileno(), length=size,
                           offset=offset,
                           access=mmap.ACCESS_READ)
    else:
        target = file_obj
        target.seek(offset)

    while size > 0:
        data = target.read(chunk_size)
        generator.update(data[:min(len(data), size)])
        size -= len(data)

    if target is file_obj:
        file_obj.seek(offset)
    else:
        target.close()
    return generator.generate().digest()
项目:cas_python_sdk    作者:tencentyun    | 项目源码 | 文件源码
def compute_hash_from_file_obj(file_obj, offset=0, size=None, chunk_size=1024 * 1024):
    etag = hashlib.sha256()
    generator = TreeHashGenerator()

    size = size or os.fstat(file_obj.fileno()).st_size - offset

    if size != 0 and offset % mmap.ALLOCATIONGRANULARITY == 0:
        target = mmap.mmap(file_obj.fileno(), length=size,
                           offset=offset,
                           access=mmap.ACCESS_READ)
    else:
        target = file_obj
        target.seek(offset)

    while size > 0:
        data = target.read(chunk_size)
        generator.update(data[:min(len(data), size)])
        etag.update(data[:min(len(data), size)])
        size -= len(data)

    if target is file_obj:
        file_obj.seek(offset)
    else:
        target.close()
    return etag.hexdigest(), generator.generate().digest()
项目:transvar    作者:zwdzwd    | 项目源码 | 文件源码
def __init__(self, fasta_file):
        self.faidx = {}

        self.fasta_file=fasta_file

        try:
            self.fasta_fd = open(fasta_file)
            self.fasta_handle = mmap.mmap(self.fasta_fd.fileno(), 0, access=mmap.ACCESS_READ)
        except IOError:
            print("Reference sequence doesn't exist")

        try:
            self.faidx_handle=open(fasta_file+".fai")
        except IOError:
            print("samtools faidx file doesn't exist for reference")
        self.load_faidx()

    # Function to cache fasta index in dictionary
    # faidx format contains the following columns:
    ##.the name of the sequence
    ##.the length of the sequence
    ##.the offset of the first base in the file
    ##.the number of bases in each fasta line
    ##.the number of bytes in each fasta line
项目:InternationalizationScript-iOS    作者:alexfeng    | 项目源码 | 文件源码
def unload_sheet(self, sheet_name_or_index):
        if isinstance(sheet_name_or_index, int):
            sheetx = sheet_name_or_index
        else:
            try:
                sheetx = self._sheet_names.index(sheet_name_or_index)
            except ValueError:
                raise XLRDError('No sheet named <%r>' % sheet_name_or_index)
        self._sheet_list[sheetx] = None

    ##
    # This method has a dual purpose. You can call it to release
    # memory-consuming objects and (possibly) a memory-mapped file
    # (mmap.mmap object) when you have finished loading sheets in
    # on_demand mode, but still require the Book object to examine the
    # loaded sheets. It is also called automatically (a) when open_workbook
    # raises an exception and (b) if you are using a "with" statement, when 
    # the "with" block is exited. Calling this method multiple times on the 
    # same object has no ill effect.
项目:InternationalizationScript-iOS    作者:alexfeng    | 项目源码 | 文件源码
def unload_sheet(self, sheet_name_or_index):
        if isinstance(sheet_name_or_index, int):
            sheetx = sheet_name_or_index
        else:
            try:
                sheetx = self._sheet_names.index(sheet_name_or_index)
            except ValueError:
                raise XLRDError('No sheet named <%r>' % sheet_name_or_index)
        self._sheet_list[sheetx] = None

    ##
    # This method has a dual purpose. You can call it to release
    # memory-consuming objects and (possibly) a memory-mapped file
    # (mmap.mmap object) when you have finished loading sheets in
    # on_demand mode, but still require the Book object to examine the
    # loaded sheets. It is also called automatically (a) when open_workbook
    # raises an exception and (b) if you are using a "with" statement, when 
    # the "with" block is exited. Calling this method multiple times on the 
    # same object has no ill effect.
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def _malloc(self, size):
        # returns a large enough block -- it might be much larger
        i = bisect.bisect_left(self._lengths, size)
        if i == len(self._lengths):
            length = self._roundup(max(self._size, size), mmap.PAGESIZE)
            self._size *= 2
            util.info('allocating a new mmap of length %d', length)
            arena = Arena(length)
            self._arenas.append(arena)
            return (arena, 0, length)
        else:
            length = self._lengths[i]
            seq = self._len_to_seq[length]
            block = seq.pop()
            if not seq:
                del self._len_to_seq[length], self._lengths[i]

        (arena, start, stop) = block
        del self._start_to_block[(arena, start)]
        del self._stop_to_block[(arena, stop)]
        return block
项目:ndk-python    作者:gittor    | 项目源码 | 文件源码
def _malloc(self, size):
        # returns a large enough block -- it might be much larger
        i = bisect.bisect_left(self._lengths, size)
        if i == len(self._lengths):
            length = self._roundup(max(self._size, size), mmap.PAGESIZE)
            self._size *= 2
            info('allocating a new mmap of length %d', length)
            arena = Arena(length)
            self._arenas.append(arena)
            return (arena, 0, length)
        else:
            length = self._lengths[i]
            seq = self._len_to_seq[length]
            block = seq.pop()
            if not seq:
                del self._len_to_seq[length], self._lengths[i]

        (arena, start, stop) = block
        del self._start_to_block[(arena, start)]
        del self._stop_to_block[(arena, stop)]
        return block
项目:supervisoradmin    作者:jimmy201602    | 项目源码 | 文件源码
def get(self,request):
        n=12
        try:
            size = os.path.getsize(ACTIVITY_LOG)
            with open(ACTIVITY_LOG, "rb") as f:
                # for Windows the mmap parameters are different
                fm = mmap.mmap(f.fileno(), 0, mmap.MAP_SHARED, mmap.PROT_READ)
            for i in xrange(size - 1, -1, -1):
                if fm[i] == '\n':
                    n -= 1
                    if n == -1:
                        break
                lines = fm[i + 1 if i else 0:].splitlines()
            return JsonResponse({'status': "success", 'log': lines})
        except Exception as err:
            return JsonResponse({'status' : "error",'messagge' : err})
        finally:
            try:
                fm.close()
            except (UnboundLocalError, TypeError):
                return JsonResponse({'status':"error", 'message': "Activity log file is empty"})



#index
项目:SmartVHDL    作者:TheClams    | 项目源码 | 文件源码
def get_list_file(self, projname, callback=None):
        global list_module_files
        global lmf_update_ongoing
        lmf_update_ongoing = True
        lmf = []
        for folder in sublime.active_window().folders():
            for root, dirs, files in os.walk(folder):
                for fn in files:
                    if fn.lower().endswith(('.vhd','.vho','.vhdl')):
                        ffn = os.path.join(root,fn)
                        f = open(ffn)
                        if os.stat(ffn).st_size:
                            s = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
                            if s.find(b'entity') != -1:
                                lmf.append(ffn)
                            elif s.find(b'component') != -1:
                                lmf.append(ffn)
        sublime.status_message('List of module files updated')
        list_module_files[projname] = lmf[:]
        lmf_update_ongoing = False
        if callback:
            callback()
项目:Cortex-Analyzers    作者:CERT-BDF    | 项目源码 | 文件源码
def __init__(self, database):
        """Reader for the MaxMind DB file format

        Arguments:
        database -- A path to a valid MaxMind DB file such as a GeoIP2
                    database file.
        """
        with open(database, 'rb') as db_file:
            self._buffer = mmap.mmap(
                db_file.fileno(), 0, access=mmap.ACCESS_READ)

        metadata_start = self._buffer.rfind(self._METADATA_START_MARKER,
                                            self._buffer.size() - 128 * 1024)

        if metadata_start == -1:
            raise InvalidDatabaseError('Error opening database file ({0}). '
                                       'Is this a valid MaxMind DB file?'
                                       ''.format(database))

        metadata_start += len(self._METADATA_START_MARKER)
        metadata_decoder = Decoder(self._buffer, metadata_start)
        (metadata, _) = metadata_decoder.decode(metadata_start)
        self._metadata = Metadata(**metadata)  # pylint: disable=star-args

        self._decoder = Decoder(self._buffer, self._metadata.search_tree_size
                                + self._DATA_SECTION_SEPARATOR_SIZE)
项目:core-framework    作者:RedhawkSDR    | 项目源码 | 文件源码
def __init__(self, transfer_size):
        fd, self.filename = tempfile.mkstemp()
        os.ftruncate(fd, 20)
        self.buf = mmap.mmap(fd, 20, mmap.MAP_SHARED, mmap.PROT_WRITE)
        os.close(fd)
        self.total_bytes = ctypes.c_uint64.from_buffer(self.buf)
        self.total_bytes.value = 0
        self.average_time = ctypes.c_double.from_buffer(self.buf, 8)
        self.average_time.value = 0.0
        self.transfer_size = ctypes.c_uint32.from_buffer(self.buf, 16)
        self.transfer_size.value = transfer_size
项目:kinect-2-libras    作者:inessadl    | 项目源码 | 文件源码
def __init__(self, size):
            self.size = size
            self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next())
            self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
            assert win32.GetLastError() == 0, 'tagname already in use'
            self._state = (self.size, self.name)
项目:kinect-2-libras    作者:inessadl    | 项目源码 | 文件源码
def __setstate__(self, state):
            self.size, self.name = self._state = state
            self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
            assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS
项目:kinect-2-libras    作者:inessadl    | 项目源码 | 文件源码
def __init__(self, size):
            self.buffer = mmap.mmap(-1, size)
            self.size = size
            self.name = None

#
# Class allowing allocation of chunks of memory from arenas
#
项目:kinect-2-libras    作者:inessadl    | 项目源码 | 文件源码
def __init__(self, size=mmap.PAGESIZE):
        self._lastpid = os.getpid()
        self._lock = threading.Lock()
        self._size = size
        self._lengths = []
        self._len_to_seq = {}
        self._start_to_block = {}
        self._stop_to_block = {}
        self._allocated_blocks = set()
        self._arenas = []
项目:python-xkbcommon    作者:sde1000    | 项目源码 | 文件源码
def keymap_new_from_file(self, file, format=lib.XKB_KEYMAP_FORMAT_TEXT_V1):
        "Create a Keymap from an open file"
        try:
            fn = file.fileno()
        except:
            fn = None
        if fn:
            load_method = "mmap_file"
            mm = mmap.mmap(fn, 0)
            buf = ffi.from_buffer(mm)
            r = lib.xkb_keymap_new_from_buffer(
                self._context, buf, mm.size(), format,
                lib.XKB_KEYMAP_COMPILE_NO_FLAGS)
            del buf
            mm.close()
        else:
            load_method = "read_file"
            keymap = file.read()
            buf = ffi.new("char[]", keymap)
            r = lib.xkb_keymap_new_from_string(
                self._context, buf, format,
                lib.XKB_KEYMAP_COMPILE_NO_FLAGS)
        if r == ffi.NULL:
            raise XKBKeymapCreationFailure(
                "xkb_keymap_new_from_buffer or xkb_keymap_new_from_string "
                "returned NULL")
        return Keymap(self, r, load_method)
项目:sharedbuffers    作者:jampp    | 项目源码 | 文件源码
def map_file(cls, fileobj, offset = 0, size = None):
        fileobj.seek(offset)
        total_size = cls._Header.unpack(fileobj.read(cls._Header.size))[0]
        map_start = offset - offset % mmap.ALLOCATIONGRANULARITY
        buf = mmap.mmap(fileobj.fileno(), total_size + offset - map_start, 
            access = mmap.ACCESS_READ, offset = map_start)
        rv = cls(buffer(buf, offset - map_start))
        rv._file = fileobj
        rv._mmap = buf
        return rv
项目:sharedbuffers    作者:jampp    | 项目源码 | 文件源码
def map_file(cls, fileobj, offset = 0, size = None):
        map_start = offset - offset % mmap.ALLOCATIONGRANULARITY
        fileobj.seek(map_start)
        buf = mmap.mmap(fileobj.fileno(), 0, access = mmap.ACCESS_READ, offset = map_start)
        rv = cls(buf, offset - map_start)
        rv._file = fileobj
        return rv
项目:sharedbuffers    作者:jampp    | 项目源码 | 文件源码
def map_file(cls, fileobj, offset = 0, size = None):
        map_start = offset - offset % mmap.ALLOCATIONGRANULARITY
        fileobj.seek(map_start)
        buf = mmap.mmap(fileobj.fileno(), 0, access = mmap.ACCESS_READ, offset = map_start)
        rv = cls(buf, offset - map_start)
        rv._file = fileobj
        return rv
项目:abe-bootstrap    作者:TryCoin-Team    | 项目源码 | 文件源码
def map_file(self, file, start):  # Initialize with bytes from file
    self.input = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ)
    self.read_cursor = start
项目:abe-bootstrap    作者:TryCoin-Team    | 项目源码 | 文件源码
def map_file(self, file, start):  # Initialize with bytes from file
    self.input = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ)
    self.read_cursor = start
项目:kAFL    作者:RUB-SysSec    | 项目源码 | 文件源码
def __get_shm(self, type_id, slave_id):
        if slave_id in self.tmp_shm[type_id]:
            shm = self.tmp_shm[type_id][slave_id]
        else:
            shm_fd = os.open(self.files[type_id] + str(slave_id), os.O_RDWR | os.O_SYNC)
            shm = mmap.mmap(shm_fd, self.sizes[type_id]*self.tasks_per_requests, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ)
            self.tmp_shm[type_id][slave_id] = shm
        return shm