Python types 模块,get() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用types.get()

项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def find_driver(self,adapter_args,uri=None):
        if getattr(self,'driver',None) != None:
            return
        drivers_available = [driver for driver in self.drivers
                             if driver in globals()]
        if uri:
            items = uri.split('://',1)[0].split(':')
            request_driver = items[1] if len(items)>1 else None
        else:
            request_driver = None
        request_driver = request_driver or adapter_args.get('driver')
        if request_driver:
            if request_driver in drivers_available:
                self.driver_name = request_driver
                self.driver = globals().get(request_driver)
            else:
                raise RuntimeError("driver %s not available" % request_driver)
        elif drivers_available:
            self.driver_name = drivers_available[0]
            self.driver = globals().get(self.driver_name)
        else:
            raise RuntimeError("no driver available %s" % str(self.drivers))
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def _select_aux(self,sql,fields,attributes):
        args_get = attributes.get
        cache = args_get('cache',None)
        if not cache:
            self.execute(sql)
            rows = self._fetchall()
        else:
            (cache_model, time_expire) = cache
            key = self.uri + '/' + sql + '/rows'
            if len(key)>200: key = hashlib_md5(key).hexdigest()
            def _select_aux2():
                self.execute(sql)
                return self._fetchall()
            rows = cache_model(key,_select_aux2,time_expire)
        if isinstance(rows,tuple):
            rows = list(rows)
        limitby = args_get('limitby', None) or (0,)
        rows = self.rowslice(rows,limitby[0],None)
        processor = args_get('processor',self.parse)
        cacheable = args_get('cacheable',False)
        return processor(rows,fields,self._colnames,cacheable=cacheable)
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def select(self, query, fields, attributes):
        """
        Always returns a Rows object, possibly empty.
        """
        sql = self._select(query, fields, attributes)
        cache = attributes.get('cache', None)
        if cache and attributes.get('cacheable',False):
            del attributes['cache']
            (cache_model, time_expire) = cache
            key = self.uri + '/' + sql
            if len(key)>200: key = hashlib_md5(key).hexdigest()
            args = (sql,fields,attributes)
            return cache_model(
                key,
                lambda self=self,args=args:self._select_aux(*args),
                time_expire)
        else:
            return self._select_aux(sql,fields,attributes)
项目:Problematica-public    作者:TechMaz    | 项目源码 | 文件源码
def __call__(cls, *args, **kwargs):
        uploads_in_blob = kwargs.get('adapter_args', {}).get(
            'uploads_in_blob', cls.uploads_in_blob)
        cls.uploads_in_blob = uploads_in_blob

        entity_quoting = kwargs.get('entity_quoting', False)
        if 'entity_quoting' in kwargs:
            del kwargs['entity_quoting']

        obj = super(AdapterMeta, cls).__call__(*args, **kwargs)
        if not entity_quoting:
            quot = obj.QUOTE_TEMPLATE = '%s'
            regex_ent = r'(\w+)'
        else:
            quot = obj.QUOTE_TEMPLATE
            regex_ent = REGEX_NO_GREEDY_ENTITY_NAME
        obj.REGEX_TABLE_DOT_FIELD = re.compile(
            r'^' + quot % regex_ent + r'\.' + quot % regex_ent + r'$')

        return obj
项目:Problematica-public    作者:TechMaz    | 项目源码 | 文件源码
def find_driver(self, adapter_args, uri=None):
        self.adapter_args = adapter_args
        if getattr(self, 'driver', None) is not None:
            return
        drivers_available = [driver for driver in self.drivers
                             if driver in iterkeys(self.db._drivers_available)]
        if uri:
            items = uri.split('://', 1)[0].split(':')
            request_driver = items[1] if len(items) > 1 else None
        else:
            request_driver = None
        request_driver = request_driver or adapter_args.get('driver')
        if request_driver:
            if request_driver in drivers_available:
                self.driver_name = request_driver
                #self.driver = globals().get(request_driver)
                self.driver = self.db._drivers_available[request_driver]
            else:
                raise RuntimeError("driver %s not available" % request_driver)
        elif drivers_available:
            self.driver_name = drivers_available[0]
            #self.driver = globals().get(self.driver_name)
            self.driver = self.db._drivers_available[self.driver_name]
        else:
            raise RuntimeError("no driver available %s" % str(self.drivers))
项目:Problematica-public    作者:TechMaz    | 项目源码 | 文件源码
def log(self, message, table=None):
        """ Logs migrations

        It will not log changes if logfile is not specified. Defaults
        to sql.log
        """

        isabs = None
        logfilename = self.adapter_args.get('logfile','sql.log')
        writelog = bool(logfilename)
        if writelog:
            isabs = os.path.isabs(logfilename)

        if table and table._dbt and writelog and self.folder:
            if isabs:
                table._loggername = logfilename
            else:
                table._loggername = pjoin(self.folder, logfilename)
            logfile = self.file_open(table._loggername, 'ab')
            logfile.write(to_bytes(message))
            self.file_close(logfile)
项目:Problematica-public    作者:TechMaz    | 项目源码 | 文件源码
def select(self, query, fields, attributes):
        """
        Always returns a Rows object, possibly empty.
        """
        sql = self._select(query, fields, attributes)
        cache = attributes.get('cache', None)
        if cache and attributes.get('cacheable',False):
            del attributes['cache']
            (cache_model, time_expire) = cache
            key = self.uri + '/' + sql
            key = hashlib_md5(key).hexdigest()
            args = (sql,fields,attributes)
            return cache_model(
                key,
                lambda self=self,args=args:self._select_aux(*args),
                time_expire)
        else:
            return self._select_aux(sql,fields,attributes)
项目:StuffShare    作者:StuffShare    | 项目源码 | 文件源码
def __call__(cls, *args, **kwargs):
        entity_quoting = kwargs.get('entity_quoting', False)
        if 'entity_quoting' in kwargs:
             del kwargs['entity_quoting']

        obj = super(AdapterMeta, cls).__call__(*args, **kwargs)
        if not entity_quoting:
            quot = obj.QUOTE_TEMPLATE = '%s'
            regex_ent = r'(\w+)'
        else:
            quot = obj.QUOTE_TEMPLATE
            regex_ent = REGEX_NO_GREEDY_ENTITY_NAME
        obj.REGEX_TABLE_DOT_FIELD = re.compile(r'^' + \
                                                    quot % regex_ent + \
                                                    r'\.' + \
                                                    quot % regex_ent + \
                                                    r'$')

        return obj


###############################################################################
# this is a generic adapter that does nothing; all others are derived from this
###############################################################################
项目:StuffShare    作者:StuffShare    | 项目源码 | 文件源码
def find_driver(self, adapter_args, uri=None):
        self.adapter_args = adapter_args
        if getattr(self, 'driver', None) != None:
            return
        drivers_available = [driver for driver in self.drivers
                             if driver in globals()]
        if uri:
            items = uri.split('://', 1)[0].split(':')
            request_driver = items[1] if len(items) > 1 else None
        else:
            request_driver = None
        request_driver = request_driver or adapter_args.get('driver')
        if request_driver:
            if request_driver in drivers_available:
                self.driver_name = request_driver
                self.driver = globals().get(request_driver)
            else:
                raise RuntimeError("driver %s not available" % request_driver)
        elif drivers_available:
            self.driver_name = drivers_available[0]
            self.driver = globals().get(self.driver_name)
        else:
            raise RuntimeError("no driver available %s" % str(self.drivers))
项目:StuffShare    作者:StuffShare    | 项目源码 | 文件源码
def log(self, message, table=None):
        """ Logs migrations

        It will not log changes if logfile is not specified. Defaults
        to sql.log
        """

        isabs = None
        logfilename = self.adapter_args.get('logfile', 'sql.log')
        writelog = bool(logfilename)
        if writelog:
            isabs = os.path.isabs(logfilename)

        if table and table._dbt and writelog and self.folder:
            if isabs:
                table._loggername = logfilename
            else:
                table._loggername = pjoin(self.folder, logfilename)
            logfile = self.file_open(table._loggername, 'a')
            logfile.write(message)
            self.file_close(logfile)
项目:StuffShare    作者:StuffShare    | 项目源码 | 文件源码
def _select_aux(self, sql, fields, attributes):
        args_get = attributes.get
        cache = args_get('cache',None)
        if not cache:
            self.execute(sql)
            rows = self._fetchall()
        else:
            (cache_model, time_expire) = cache
            key = self.uri + '/' + sql + '/rows'
            if len(key)>200: key = hashlib_md5(key).hexdigest()
            def _select_aux2():
                self.execute(sql)
                return self._fetchall()
            rows = cache_model(key, _select_aux2, time_expire)
        if isinstance(rows, tuple):
            rows = list(rows)
        limitby = args_get('limitby', None) or (0, )
        rows = self.rowslice(rows, limitby[0], None)
        processor = args_get('processor', self.parse)
        cacheable = args_get('cacheable', False)
        return processor(rows, fields, self._colnames, cacheable=cacheable)
项目:StuffShare    作者:StuffShare    | 项目源码 | 文件源码
def select(self, query, fields, attributes):
        """
        Always returns a Rows object, possibly empty.
        """
        sql = self._select(query, fields, attributes)
        cache = attributes.get('cache', None)
        if cache and attributes.get('cacheable', False):
            del attributes['cache']
            (cache_model, time_expire) = cache
            key = self.uri + '/' + sql
            if len(key) > 200: key = hashlib_md5(key).hexdigest()
            args = (sql, fields, attributes)
            return cache_model(
                key,
                lambda self=self, args=args: self._select_aux(*args),
                time_expire)
        else:
            return self._select_aux(sql, fields, attributes)
项目:StuffShare    作者:StuffShare    | 项目源码 | 文件源码
def import_table_definitions(self, path, migrate=False,
                                 fake_migrate=False, tables=None):
        if tables:
            for table in tables:
                self.define_table(**table)
        else:
            pattern = pjoin(path, self._uri_hash+'_*.table')
            for filename in glob.glob(pattern):
                tfile = self._adapter.file_open(filename, 'r')
                try:
                    sql_fields = pickle.load(tfile)
                    name = filename[len(pattern)-7:-6]
                    mf = [(value['sortable'],
                           Field(key,
                                 type=value['type'],
                                 length=value.get('length', None),
                                 notnull=value.get('notnull', False),
                                 unique=value.get('unique', False))) \
                              for key, value in sql_fields.iteritems()]
                    mf.sort(lambda a, b: cmp(a[0], b[0]))
                    self.define_table(name, *[item[1] for item in mf],
                                      **dict(migrate=migrate,
                                             fake_migrate=fake_migrate))
                finally:
                    self._adapter.file_close(tfile)
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def getCachedPath(self, path):
        return self._pathCache.get(path)
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def getTypeAndEncoding(filename, types, encodings, defaultType):
    p, ext = os.path.splitext(filename)
    ext = ext.lower()
    if encodings.has_key(ext):
        enc = encodings[ext]
        ext = os.path.splitext(p)[1].lower()
    else:
        enc = None
    type = types.get(ext, defaultType)
    return type, enc
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def getChild(self, path, request):
        """See twisted.web.Resource.getChild.
        """
        self.restat()

        if not self.isdir():
            return self.childNotFound

        if path:
            fpath = self.child(path)
        else:
            fpath = self.childSearchPreauth(*self.indexNames)
            if fpath is None:
                return self.directoryListing()

        if not fpath.exists():
            fpath = fpath.siblingExtensionSearch(*self.ignoredExts)
            if fpath is None:
                return self.childNotFound

        if platformType == "win32":
            # don't want .RPY to be different than .rpy, since that would allow
            # source disclosure.
            processor = InsensitiveDict(self.processors).get(fpath.splitext()[1])
        else:
            processor = self.processors.get(fpath.splitext()[1])
        if processor:
            return resource.IResource(processor(fpath.path, self.registry))
        return self.createSimilarFile(fpath.path)

    # methods to allow subclasses to e.g. decrypt files on the fly:
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def CASE(self,query,t,f):
        def represent(x):
            types = {type(True):'boolean',type(0):'integer',type(1.0):'double'}
            if x is None: return 'NULL'
            elif isinstance(x,Expression): return str(x)
            else: return self.represent(x,types.get(type(x),'string'))
        return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \
                              (self.expand(query),represent(t),represent(f)))

###################################################################################
# List of all the available adapters; they all extend BaseAdapter.
###################################################################################
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def select(self, query, fields, attributes):
        """
        Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION.
        Note that the entire database, rather than one record, is locked
        (it will be locked eventually anyway by the following UPDATE).
        """
        if attributes.get('for_update', False) and not 'cache' in attributes:
            self.execute('BEGIN IMMEDIATE TRANSACTION;')
        return super(SQLiteAdapter, self).select(query, fields, attributes)
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def NOT(self,first):
        nops = { self.EQ: self.NE,
                 self.NE: self.EQ,
                 self.LT: self.GE,
                 self.GT: self.LE,
                 self.LE: self.GT,
                 self.GE: self.LT}
        if not isinstance(first,Query):
            raise SyntaxError("Not suported")
        nop = nops.get(first.op,None)
        if not nop:
            raise SyntaxError("Not suported %s" % first.op.__name__)
        first.op = nop
        return self.expand(first)
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def select(self,query,fields,attributes):
        """
        This is the GAE version of select.  some notes to consider:
         - db['_lastsql'] is not set because there is not SQL statement string
           for a GAE query
         - 'nativeRef' is a magical fieldname used for self references on GAE
         - optional attribute 'projection' when set to True will trigger
           use of the GAE projection queries.  note that there are rules for
           what is accepted imposed by GAE: each field must be indexed,
           projection queries cannot contain blob or text fields, and you
           cannot use == and also select that same field.  see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection
         - optional attribute 'filterfields' when set to True web2py will only
           parse the explicitly listed fields into the Rows object, even though
           all fields are returned in the query.  This can be used to reduce
           memory usage in cases where true projection queries are not
           usable.
         - optional attribute 'reusecursor' allows use of cursor with queries
           that have the limitby attribute.  Set the attribute to True for the
           first query, set it to the value of db['_lastcursor'] to continue
           a previous query.  The user must save the cursor value between
           requests, and the filters must be identical.  It is up to the user
           to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors
        """

        (items, tablename, fields) = self.select_raw(query,fields,attributes)
        # self.db['_lastsql'] = self._select(query,fields,attributes)
        rows = [[(t==self.db[tablename]._id.name and item) or \
                 (t=='nativeRef' and item) or getattr(item, t) \
                     for t in fields] for item in items]
        colnames = ['%s.%s' % (tablename, t) for t in fields]
        processor = attributes.get('processor',self.parse)
        return processor(rows,fields,colnames,False)
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def _select(self,query,fields,attributes):
        if not isinstance(query,Query):
            raise SyntaxError("Not Supported")
        for key in set(attributes.keys())-SELECT_ARGS:
            raise SyntaxError('invalid select attribute: %s' % key)
        new_fields=[]
        for item in fields:
            if isinstance(item,SQLALL):
                new_fields += item._table
            else:
                new_fields.append(item)
        def uid(fd):
            return fd=='id' and '_id' or fd
        def get(row,fd):
            return fd=='id' and long(row['_id']) or row.get(fd,None)
        fields = new_fields
        tablename = self.get_table(query)
        fieldnames = [f.name for f in (fields or self.db[tablename])]
        colnames = ['%s.%s' % (tablename,k) for k in fieldnames]
        fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames])
        fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\
            dict(t=tablename,
                 query=self.expand(query),
                 order='%s._id' % tablename,
                 fields=fields)
        return fn, colnames
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def select(self,query,fields,attributes):
        if not isinstance(query,Query):
            raise SyntaxError("Not Supported")
        fn, colnames = self._select(query,fields,attributes)
        tablename = colnames[0].split('.')[0]
        ctable = self.connection[tablename]
        rows = [cols['value'] for cols in ctable.query(fn)]
        processor = attributes.get('processor',self.parse)
        return processor(rows,fields,colnames,False)
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def is_flag(self, flag):
        if self.search_fields.get(flag, None) in self.flags:
            return True
        else:
            return False
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def __getitem__(self, k):
        key=str(k)
        _extra = self.__dict__.get('_extra', None)
        if _extra is not None:
            v = _extra.get(key, None)
            if v:
                return v
        m = REGEX_TABLE_DOT_FIELD.match(key)
        if m:
            try:
                return ogetattr(self, m.group(1))[m.group(2)]
            except (KeyError,AttributeError,TypeError):
                key = m.group(2)
        return ogetattr(self, key)
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
        if not hasattr(THREAD_LOCAL,'db_instances'):
            THREAD_LOCAL.db_instances = {}
        if not hasattr(THREAD_LOCAL,'db_instances_zombie'):
            THREAD_LOCAL.db_instances_zombie = {}
        if uri == '<zombie>':
            db_uid = kwargs['db_uid'] # a zombie must have a db_uid!
            if db_uid in THREAD_LOCAL.db_instances:
                db_group = THREAD_LOCAL.db_instances[db_uid]
                db = db_group[-1]
            elif db_uid in THREAD_LOCAL.db_instances_zombie:
                db = THREAD_LOCAL.db_instances_zombie[db_uid]
            else:
                db = super(DAL, cls).__new__(cls)
                THREAD_LOCAL.db_instances_zombie[db_uid] = db
        else:
            db_uid = kwargs.get('db_uid',hashlib_md5(repr(uri)).hexdigest())
            if db_uid in THREAD_LOCAL.db_instances_zombie:
                db = THREAD_LOCAL.db_instances_zombie[db_uid]
                del THREAD_LOCAL.db_instances_zombie[db_uid]
            else:
                db = super(DAL, cls).__new__(cls)
            db_group = THREAD_LOCAL.db_instances.get(db_uid,[])
            db_group.append(db)
            THREAD_LOCAL.db_instances[db_uid] = db_group
        db._db_uid = db_uid
        return db
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def import_table_definitions(self, path, migrate=False,
                                 fake_migrate=False, items=None):
        pattern = pjoin(path,self._uri_hash+'_*.table')
        if items:
            for tablename, table in items.iteritems():
                # TODO: read all field/table options
                fields = []
                # remove unsupported/illegal Table arguments
                [table.pop(name) for name in ("name", "fields") if
                 name in table]
                if "items" in table:
                    for fieldname, field in table.pop("items").iteritems():
                        # remove unsupported/illegal Field arguments
                        [field.pop(key) for key in ("requires", "name",
                         "compute", "colname") if key in field]
                        fields.append(Field(str(fieldname), **field))
                self.define_table(str(tablename), *fields, **table)
        else:
            for filename in glob.glob(pattern):
                tfile = self._adapter.file_open(filename, 'r')
                try:
                    sql_fields = pickle.load(tfile)
                    name = filename[len(pattern)-7:-6]
                    mf = [(value['sortable'],
                           Field(key,
                                 type=value['type'],
                                 length=value.get('length',None),
                                 notnull=value.get('notnull',False),
                                 unique=value.get('unique',False))) \
                              for key, value in sql_fields.iteritems()]
                    mf.sort(lambda a,b: cmp(a[0],b[0]))
                    self.define_table(name,*[item[1] for item in mf],
                                      **dict(migrate=migrate,
                                             fake_migrate=fake_migrate))
                finally:
                    self._adapter.file_close(tfile)
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def lazy_define_table(
        self,
        tablename,
        *fields,
        **args
        ):
        args_get = args.get
        common_fields = self._common_fields
        if common_fields:
            fields = list(fields) + list(common_fields)

        table_class = args_get('table_class',Table)
        table = table_class(self, tablename, *fields, **args)
        table._actual = True
        self[tablename] = table
        # must follow above line to handle self references
        table._create_references()
        for field in table:
            if field.requires == DEFAULT:
                field.requires = sqlhtml_validators(field)

        migrate = self._migrate_enabled and args_get('migrate',self._migrate)
        if migrate and not self._uri in (None,'None') \
                or self._adapter.dbengine=='google:datastore':
            fake_migrate = self._fake_migrate_all or \
                args_get('fake_migrate',self._fake_migrate)
            polymodel = args_get('polymodel',None)
            try:
                GLOBAL_LOCKER.acquire()
                self._lastsql = self._adapter.create_table(
                    table,migrate=migrate,
                    fake_migrate=fake_migrate,
                    polymodel=polymodel)
            finally:
                GLOBAL_LOCKER.release()
        else:
            table._dbt = None
        on_define = args_get('on_define',None)
        if on_define: on_define(table)
        return table
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def __getattr__(self, key):
        if key == 'id':
            return long(self)
        self.__allocate()
        return self._record.get(key, None)
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def get(self, key, default=None):
        return self.__getattr__(key, default)
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def __getitem__(self, key):
        if key == 'id':
            return long(self)
        self.__allocate()
        return self._record.get(key, None)
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def _create_references(self):
        db = self._db
        pr = db._pending_references
        self._referenced_by = []
        for field in self:
            fieldname = field.name
            field_type = field.type
            if isinstance(field_type,str) and field_type[:10] == 'reference ':
                ref = field_type[10:].strip()
                if not ref.split():
                    raise SyntaxError('Table: reference to nothing: %s' %ref)
                refs = ref.split('.')
                rtablename = refs[0]
                if not rtablename in db:
                    pr[rtablename] = pr.get(rtablename,[]) + [field]
                    continue
                rtable = db[rtablename]
                if len(refs)==2:
                    rfieldname = refs[1]
                    if not hasattr(rtable,'_primarykey'):
                        raise SyntaxError(
                            'keyed tables can only reference other keyed tables (for now)')
                    if rfieldname not in rtable.fields:
                        raise SyntaxError(
                            "invalid field '%s' for referenced table '%s' in table '%s'" \
                            % (rfieldname, rtablename, self._tablename))
                rtable._referenced_by.append(field)
        for referee in pr.get(self._tablename,[]):
            self._referenced_by.append(referee)
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def _select(self, *fields, **attributes):
        adapter = self.db._adapter
        tablenames = adapter.tables(self.query,
                                    attributes.get('join',None),
                                    attributes.get('left',None),
                                    attributes.get('orderby',None),
                                    attributes.get('groupby',None))
        fields = adapter.expand_all(fields, tablenames)
        return adapter._select(self.query,fields,attributes)
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def select(self, *fields, **attributes):
        adapter = self.db._adapter
        tablenames = adapter.tables(self.query,
                                    attributes.get('join',None),
                                    attributes.get('left',None),
                                    attributes.get('orderby',None),
                                    attributes.get('groupby',None))
        fields = adapter.expand_all(fields, tablenames)
        return adapter.select(self.query,fields,attributes)
项目:spc    作者:whbrewer    | 项目源码 | 文件源码
def delete_uploaded_files(self, upload_fields=None):
        table = self.db[self.db._adapter.tables(self.query)[0]]
        # ## mind uploadfield==True means file is not in DB
        if upload_fields:
            fields = upload_fields.keys()
        else:
            fields = table.fields
        fields = [f for f in fields if table[f].type == 'upload'
                   and table[f].uploadfield == True
                   and table[f].autodelete]
        if not fields:
            return False
        for record in self.select(*[table[f] for f in fields]):
            for fieldname in fields:
                field = table[fieldname]
                oldname = record.get(fieldname, None)
                if not oldname:
                    continue
                if upload_fields and oldname == upload_fields[fieldname]:
                    continue
                if field.custom_delete:
                    field.custom_delete(oldname)
                else:
                    uploadfolder = field.uploadfolder
                    if not uploadfolder:
                        uploadfolder = pjoin(
                            self.db._adapter.folder, '..', 'uploads')
                    if field.uploadseparate:
                        items = oldname.split('.')
                        uploadfolder = pjoin(
                            uploadfolder,
                            "%s.%s" % (items[0], items[1]),
                            items[2][:2])
                    oldpath = pjoin(uploadfolder, oldname)
                    if exists(oldpath):
                        os.unlink(oldpath)
        return False
项目:Problematica-public    作者:TechMaz    | 项目源码 | 文件源码
def _select_aux(self, sql, fields, attributes):
        args_get = attributes.get
        cache = args_get('cache',None)
        if not cache:
            self.execute(sql)
            rows = self._fetchall()
        else:
            if isinstance(cache, dict):
                cache_model = cache['model']
                time_expire = cache['expiration']
                key = cache.get('key')
                if not key:
                    key = self.uri + '/' + sql + '/rows'
                    key = hashlib_md5(key).hexdigest()
            else:
                (cache_model, time_expire) = cache
                key = self.uri + '/' + sql + '/rows'
                key = hashlib_md5(key).hexdigest()
            def _select_aux2():
                self.execute(sql)
                return self._fetchall()
            rows = cache_model(key,_select_aux2,time_expire)
        if isinstance(rows,tuple):
            rows = list(rows)
        limitby = args_get('limitby', None) or (0,)
        rows = self.rowslice(rows,limitby[0],None)
        processor = args_get('processor', self.parse)
        cacheable = args_get('cacheable',False)
        return processor(rows,fields,self._colnames,cacheable=cacheable)
项目:Problematica-public    作者:TechMaz    | 项目源码 | 文件源码
def EXPAND_CASE(self, query, true_false):
        def represent(x):
            types = {type(True):'boolean',type(0):'integer',type(1.0):'double'}
            if x is None: return 'NULL'
            elif isinstance(x,Expression): return str(x)
            else: return self.represent(x,types.get(type(x),'string'))

        return 'CASE WHEN %s THEN %s ELSE %s END' % (
            self.expand(query),
            represent(true_false[0]),
            represent(true_false[1]))
项目:LLDB_Tools    作者:blankwall    | 项目源码 | 文件源码
def eval(self, **kwds):
            """
            modules -- dictionary of module name to SBModule
            current_module -- current frame's module
            """
            modules = kwds['modules']
            m = modules.get(self.module, kwds['current_module'])
            s = next(s for s in m.symbols if s.name == self.symbol)
            a = s.addr
            return a.load_addr
项目:LLDB_Tools    作者:blankwall    | 项目源码 | 文件源码
def repr(cls, s):
        TYPE_PREFIX = 'eTypeClass'
        start = cls.address(s)
        end = start + cls.size(s)

        types = {getattr(lldb,n) : n[len(TYPE_PREFIX):] for n in dir(lldb) if n.startswith(TYPE_PREFIX)}
        attributes = (n for n in ('external','synthetic') if getattr(s,n))
        if s.type in (lldb.eTypeClassFunction,):
            attributes = itertools.chain(attributes, ('instructions={:d}'.format(len(s.instructions))))
        attributes=filter(None,attributes)
        return '{name:s}{opt_mangled:s} type={type:s} 0x{addr:x}{opt_size:s}'.format(name=s.name, type=types.get(s.type,str(s.type)), opt_mangled=(' ('+s.mangled+')') if s.mangled else '', addr=start, opt_size=':+0x{:x}'.format(end-start) if end > start else '') + ((' ' + ' '.join(attributes)) if attributes else '')
项目:LLDB_Tools    作者:blankwall    | 项目源码 | 文件源码
def _dump(cls, data, kind=1):
        lookup = {1:'B', 2:'H', 4:'I', 8:'L'}
        itemtype = lookup.get(kind, kind)
        return array.array(itemtype, data)

    ## specific dumping formats
项目:LLDB_Tools    作者:blankwall    | 项目源码 | 文件源码
def __set_enabled(cls, id, bool):
        bp = cls.get(id)
        res, _ = bp.IsEnabled(), bp.SetEnabled(bool)
        return res
项目:LLDB_Tools    作者:blankwall    | 项目源码 | 文件源码
def get(cls, id):
        res = cls.cache[id]
        return cls.__internal__[res]
项目:LLDB_Tools    作者:blankwall    | 项目源码 | 文件源码
def rm_command(cls, target, id):
        debugger = target.GetDebugger()
        bp = cls.get(id)
        bptype = 'breakpoint' if isinstance(bp, lldb.SBBreakpoint) else 'watchpoint'
        return debugger.HandleCommand("{:s} command delete {:d}".format(bptype, bp.GetID()))
项目:LLDB_Tools    作者:blankwall    | 项目源码 | 文件源码
def remove(cls, target, id):
        key, bp = cls.cache[id], cls.get(id)
        if not isinstance(bp, (lldb.SBBreakpoint, lldb.SBWatchpoint)):
            raise TypeError("{:s}.{:s}.remove : Unable to remove unknown breakpoint type. : {!r}".format(__name__, cls.__name__, bp.__class__))
        cls.rm_command(target, id)
        cls.__rm_cache(id)
        cls.__rm_address(bp)
        cls.__expression__.pop(key)
        cls.__function__.pop(key)
        return target.BreakpointDelete(bp.GetID()) if isinstance(bp, lldb.SBBreakpoint) else target.DeleteWatchpoint(bp.GetID())
项目:LLDB_Tools    作者:blankwall    | 项目源码 | 文件源码
def repr(cls, id):
        key, bp = cls.cache[id], cls.get(id)
        addr, size = cls.__location(bp)
        expr = cls.__expression__.get(key, None)
        if isinstance(expr, list):
            expr = ' -- ' + ';'.join(expr)
        elif expr is None:
            expr = ''
        else:
            expr = ' -- ' + repr(expr)
        return '0x{:x}:+{:d} -- {{{:s}}}'.format(addr, size, 'enabled' if bp.IsEnabled() else 'disabled') + expr
项目:sslstrip-hsts-openwrt    作者:adde88    | 项目源码 | 文件源码
def getCachedPath(self, path):
        return self._pathCache.get(path)
项目:sslstrip-hsts-openwrt    作者:adde88    | 项目源码 | 文件源码
def getTypeAndEncoding(filename, types, encodings, defaultType):
    p, ext = os.path.splitext(filename)
    ext = ext.lower()
    if encodings.has_key(ext):
        enc = encodings[ext]
        ext = os.path.splitext(p)[1].lower()
    else:
        enc = None
    type = types.get(ext, defaultType)
    return type, enc
项目:sslstrip-hsts-openwrt    作者:adde88    | 项目源码 | 文件源码
def getChild(self, path, request):
        """See twisted.web.Resource.getChild.
        """
        self.restat()

        if not self.isdir():
            return self.childNotFound

        if path:
            fpath = self.child(path)
        else:
            fpath = self.childSearchPreauth(*self.indexNames)
            if fpath is None:
                return self.directoryListing()

        if not fpath.exists():
            fpath = fpath.siblingExtensionSearch(*self.ignoredExts)
            if fpath is None:
                return self.childNotFound

        if platformType == "win32":
            # don't want .RPY to be different than .rpy, since that would allow
            # source disclosure.
            processor = InsensitiveDict(self.processors).get(fpath.splitext()[1])
        else:
            processor = self.processors.get(fpath.splitext()[1])
        if processor:
            return resource.IResource(processor(fpath.path, self.registry))
        return self.createSimilarFile(fpath.path)

    # methods to allow subclasses to e.g. decrypt files on the fly:
项目:MacHeap    作者:blankwall    | 项目源码 | 文件源码
def eval(self, **kwds):
            """
            modules -- dictionary of module name to SBModule
            current_module -- current frame's module
            """
            modules = kwds['modules']
            m = modules.get(self.module, kwds['current_module'])
            s = next(s for s in m.symbols if s.name == self.symbol)
            a = s.addr
            return a.load_addr
项目:MacHeap    作者:blankwall    | 项目源码 | 文件源码
def repr(cls, s):
        TYPE_PREFIX = 'eTypeClass'
        start = cls.address(s)
        end = start + cls.size(s)

        types = {getattr(lldb,n) : n[len(TYPE_PREFIX):] for n in dir(lldb) if n.startswith(TYPE_PREFIX)}
        attributes = (n for n in ('external','synthetic') if getattr(s,n))
        if s.type in (lldb.eTypeClassFunction,):
            attributes = itertools.chain(attributes, ('instructions={:d}'.format(len(s.instructions))))
        attributes=filter(None,attributes)
        return '{name:s}{opt_mangled:s} type={type:s} 0x{addr:x}{opt_size:s}'.format(name=s.name, type=types.get(s.type,str(s.type)), opt_mangled=(' ('+s.mangled+')') if s.mangled else '', addr=start, opt_size=':+0x{:x}'.format(end-start) if end > start else '') + ((' ' + ' '.join(attributes)) if attributes else '')
项目:MacHeap    作者:blankwall    | 项目源码 | 文件源码
def _dump(cls, data, kind=1):
        lookup = {1:'B', 2:'H', 4:'I', 8:'L'}
        itemtype = lookup.get(kind, kind)
        return array.array(itemtype, data)

    ## specific dumping formats