Python dateutil 模块,parser() 实例源码

我们从Python开源项目中,提取了以下36个代码示例,用于说明如何使用dateutil.parser()

项目:FundSpider    作者:s6530085    | 项目源码 | 文件源码
def parse_home(self, content):
        #???,???????gb2312,???gbk?????
        html = etree.HTML(content, parser=etree.HTMLParser(encoding='gbk'))
        items = html.xpath('//div[@class="quotebody"]//li/a')
        stocks = []
        for item in items:
            #??i???????(600000)
            i = item.text.strip()
            if len(i.split('(')) == 2:
                name = i.split('(')[0]
                code = i.split('(')[1][0:6]
                if self._isstock(code):
                    stocks.append(code)
        return stocks

    #???????????
项目:ExtensionCrawler    作者:logicalhacking    | 项目源码 | 文件源码
def last_crx(archivedir, extid, date=None):
    last_crx = ""
    last_crx_etag = ""
    tar = os.path.join(archivedir, get_local_archive_dir(extid),
                       extid + ".tar")
    if os.path.exists(tar):
        with tarfile.open(tar, 'r') as t:
            old_crxs = sorted([
                x.name for x in t.getmembers()
                if x.name.endswith(".crx") and x.size > 0 and (
                    date is None or (dateutil.parser.parse(
                        os.path.split(os.path.split(x.name)[0])[1]) <= date))
            ])
            if old_crxs != []:
                last_crx = old_crxs[-1]
                headers_content = t.extractfile(
                    last_crx + ".headers").read().decode().replace(
                        '"', '\\"').replace("'", '"')
                headers_json = json.loads(headers_content)
                last_crx_etag = headers_json["ETag"]

    return last_crx, last_crx_etag
项目:ExtensionCrawler    作者:logicalhacking    | 项目源码 | 文件源码
def first_crx(archivedir, extid, date=None):
    first_crx = ""
    tar = os.path.join(archivedir, get_local_archive_dir(extid),
                       extid + ".tar")
    if os.path.exists(tar):
        t = tarfile.open(tar, 'r')
        old_crxs = sorted([
            x.name for x in t.getmembers()
            if x.name.endswith(".crx") and x.size > 0 and (
                date is None or (date <= dateutil.parser.parse(
                    os.path.split(os.path.split(x.name)[0])[1])))
        ])
        t.close()
        if old_crxs != []:
            first_crx = old_crxs[0]

    return first_crx
项目:cuckoodroid-2.0    作者:idanr1986    | 项目源码 | 文件源码
def parse(self, path):
        parser = StapParser(open(path))

        for event in parser:
            pid = event["pid"]
            if pid not in self.pids_seen:
                self.pids_seen.add(pid)
                ppid = self.forkmap.get(pid, -1)

                process = {
                    "pid": pid,
                    "ppid": ppid,
                    "process_name": event["process_name"],
                    "first_seen": event["time"],
                }

                # create a process event as we don't have those with linux+systemtap
                pevent = dict(process)
                pevent["type"] = "process"
                yield pevent

                process["calls"] = FilteredProcessLog(parser, pid=pid)
                self.processes.append(process)

            yield event
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def _format_is_iso(f):
    """
    Does format match the iso8601 set that can be handled by the C parser?
    Generally of form YYYY-MM-DDTHH:MM:SS - date separator can be different
    but must be consistent.  Leading 0s in dates and times are optional.
    """
    iso_template = '%Y{date_sep}%m{date_sep}%d{time_sep}%H:%M:%S.%f'.format
    excluded_formats = ['%Y%m%d', '%Y%m', '%Y']

    for date_sep in [' ', '/', '\\', '-', '.', '']:
        for time_sep in [' ', 'T']:
            if (iso_template(date_sep=date_sep,
                             time_sep=time_sep
                             ).startswith(f) and f not in excluded_formats):
                return True
    return False
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_parsers_timestring(self):
        tm._skip_if_no_dateutil()
        from dateutil.parser import parse

        # must be the same as dateutil result
        cases = {'10:15': (parse('10:15'), datetime.datetime(1, 1, 1, 10, 15)),
                 '9:05': (parse('9:05'), datetime.datetime(1, 1, 1, 9, 5))}

        for date_str, (exp_now, exp_def) in compat.iteritems(cases):
            result1, _, _ = tools.parse_time_string(date_str)
            result2 = to_datetime(date_str)
            result3 = to_datetime([date_str])
            result4 = Timestamp(date_str)
            result5 = DatetimeIndex([date_str])[0]
            # parse time string return time string based on default date
            # others are not, and can't be changed because it is used in
            # time series plot
            self.assertEqual(result1, exp_def)
            self.assertEqual(result2, exp_now)
            self.assertEqual(result3, exp_now)
            self.assertEqual(result4, exp_now)
            self.assertEqual(result5, exp_now)
项目:dcc-metadata-indexer    作者:BD2KGenomics    | 项目源码 | 文件源码
def input_Options():
    """
    Creates the parse options
    """
    parser = argparse.ArgumentParser(description='Directory that contains Json files.')
    parser.add_argument('-d', '--test-directory', help='Directory that contains the json metadata files')
    parser.add_argument('-u', '--skip-uuid-directory', help='Directory that contains files with file uuids (bundle uuids, one per line, file ending with .redacted) that represent databundles that should be skipped, useful for redacting content (but not deleting it)')
    parser.add_argument('-m', '--metadata-schema', help='File that contains the metadata schema')
    parser.add_argument('-s', '--skip-program', help='Lets user skip certain json files that contain a specific program test')
    parser.add_argument('-o', '--only-program', help='Lets user include certain json files that contain a specific program  test')
    parser.add_argument('-r', '--skip-project', help='Lets user skip certain json files that contain a specific program test')
    parser.add_argument('-t', '--only-project', help='Lets user include certain json files that contain a specific program  test')
    parser.add_argument('-a', '--storage-access-token', default="NA", help='Storage access token to download the metadata.json files') 
    parser.add_argument('-n', '--server-host', default="redwood.io", help='hostname for the storage service')
    parser.add_argument('-p', '--max-pages', default=None, type=int, help='Specify maximum number of pages to download')
    parser.add_argument('-preserve-version',action='store_true', default=False, help='Keep all copies of analysis events')

    args = parser.parse_args()
    return args
项目:dcc-metadata-indexer    作者:BD2KGenomics    | 项目源码 | 文件源码
def input_Options():
    """
    Creates the parse options
    """
    parser = argparse.ArgumentParser(description='Directory that contains Json files.')
    parser.add_argument('-d', '--test-directory', help='Directory that contains the json metadata files')
    parser.add_argument('-u', '--skip-uuid-directory', help='Directory that contains files with file uuids (bundle uuids, one per line, file ending with .redacted) that represent databundles that should be skipped, useful for redacting content (but not deleting it)')
    parser.add_argument('-m', '--metadata-schema', help='File that contains the metadata schema')
    parser.add_argument('-s', '--skip-program', help='Lets user skip certain json files that contain a specific program test')
    parser.add_argument('-o', '--only-program', help='Lets user include certain json files that contain a specific program  test')
    parser.add_argument('-r', '--skip-project', help='Lets user skip certain json files that contain a specific program test')
    parser.add_argument('-t', '--only-project', help='Lets user include certain json files that contain a specific program  test')
    parser.add_argument('-a', '--storage-access-token', default="NA", help='Storage access token to download the metadata.json files')
    parser.add_argument('-c', '--client-path', default="ucsc-storage-client/", help='Path to access the ucsc-storage-client tool')
    parser.add_argument('-n', '--server-host', default="storage.ucsc-cgl.org", help='hostname for the storage service')
    parser.add_argument('-p', '--max-pages', default=None, type=int, help='Specify maximum number of pages to download')
    parser.add_argument('-preserve-version',action='store_true', default=False, help='Keep all copies of analysis events')

    args = parser.parse_args()
    return args
项目:cyphon    作者:dunbarcyber    | 项目源码 | 文件源码
def parse_date(date):
    """
    Takes a string and attempts to parse it as a date. If successful,
    returns a datetime object based on the string. Otherwise,
    returns None.
    """
    if isinstance(date, datetime.datetime):  # just in case...
        return date
    elif isinstance(date, str):
        try:
            parsed_date = dateutil.parser.parse(date)
            return ensure_tz_aware(parsed_date)
        except ValueError:
            try:
                return datetime.datetime.fromtimestamp(float(date), tz=UTC_TZ)
            except ValueError:
                return None
    else:
        return None
项目:hoaxy-backend    作者:IUNetSci    | 项目源码 | 文件源码
def get_value(self):
        """Get the original values."""
        if self.value_type == "str":
            return self.value
        elif self.value_type == 'int':
            return int(self.value)
        elif self.value_type == 'bool':
            if self.value.lower() in ('true', 't', 'y', 'yes'):
                return True
            elif self.value.lower() in ('false', 'f', 'n', 'no'):
                return False
            else:
                return bool(self.value)
        elif self.value_type == 'float':
            return float(self.value)
        elif self.value_type == 'datetime':
            return dateutil.parser.parse(self.value)
        else:
            logger.error('Unsupported meta value_type %s', self.value_type)
            return None
项目:v2ex-tornado-2    作者:coderyy    | 项目源码 | 文件源码
def date(value, fmt=None):
    date = dateutil.parser.parse(str(value))
    native = date.replace(tzinfo=None)
    format='%b %d, %Y'
    return native.strftime(format)
项目:django-souvenirs    作者:appsembler    | 项目源码 | 文件源码
def __call__(self, parser, namespace, values, option_string=None):
        try:
            dt = dateutil.parser.parse(values)
        except ValueError:
            raise CommandError("can't parse date: {}".format(values))
        if dt.tzinfo is None:
            dt = timezone.make_aware(dt)
        setattr(namespace, self.dest, dt)
项目:Dshield    作者:ywjt    | 项目源码 | 文件源码
def testParserAll(self):
        # All interface
        from dateutil.parser import parse
        from dateutil.parser import parserinfo

        # Other public classes
        from dateutil.parser import parser

        for var in (parse, parserinfo, parser):
            self.assertIsNot(var, None)
项目:data-bundle-examples    作者:HumanCellAtlas    | 项目源码 | 文件源码
def __init__(self):
        parser = argparse.ArgumentParser(description='Downloads data files for the various bundles.')
        parser.add_argument('--input-dir', default='.', required=True)

        # get args
        args = parser.parse_args()
        self.input_dir = args.input_dir

        # run
        self.run()
项目:FundSpider    作者:s6530085    | 项目源码 | 文件源码
def parse_stock(self, content):
        stock = StockInfo()
        html = etree.HTML(content, parser=etree.HTMLParser(encoding='utf-8'))
        ths = html.xpath('//th[@class="tips-fieldnameL"]')
        tds = html.xpath('//td[contains(@class, "tips-dataL")]')
        for (index, th) in enumerate(ths):
            key = th.text.strip()
            value = tds[index].text.strip()
            if value == '--':
                value = ''
            if key == StockInfo.FULL_NAME_CHINESE_KEY:
                stock.fullname = value
            elif key == StockInfo.USED_NAME_CHINESE_KEY:
                #?????
                stock.used_names = value.split('->')
            elif key == StockInfo.CODE_CHINESE_KEY:
                stock.code = value
            elif key == StockInfo.SHORT_NAME_CHINESE_KEY:
                stock.shortname = value
            elif key == StockInfo.MARKET_CHINESE_KEY:
                stock.market = value
            elif key == StockInfo.INDUSTRY_CHINESE_KEY:
                stock.industry = value
            elif key == StockInfo.AREA_CHINESE_KEY:
                stock.area = value
            # ???,?????????????,??????????????,
            elif key == StockInfo.RELEASE_DATE_CHINESE_KEY:
                stock.releasedate = value
        return stock
项目:ExtensionCrawler    作者:logicalhacking    | 项目源码 | 文件源码
def last_modified_http_date(path):
    if path is "":
        return ""
    return httpdate(dateutil.parser.parse(last_modified_utc_date(path)))
项目:python_iotile_cloud    作者:iotile    | 项目源码 | 文件源码
def _date_format(self, timestamp):
        try:
            dt = dateutil.parser.parse(timestamp)
            return dt
        except Exception as e:
            logger.error('Unable to parse timestamp (with parser): ' + str(e))
            sys.exit(1)
项目:twtxt    作者:buckket    | 项目源码 | 文件源码
def parse_iso8601(string):
    """Parse string using dateutil.parser."""
    return make_aware(dateutil.parser.parse(string))
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
    """
    Try hard to parse datetime string, leveraging dateutil plus some extra
    goodies like quarter recognition.

    Parameters
    ----------
    arg : compat.string_types
    freq : str or DateOffset, default None
        Helps with interpreting time string if supplied
    dayfirst : bool, default None
        If None uses default from print_config
    yearfirst : bool, default None
        If None uses default from print_config

    Returns
    -------
    datetime, datetime/dateutil.parser._result, str
    """
    from pandas.core.config import get_option
    if not isinstance(arg, compat.string_types):
        return arg

    from pandas.tseries.offsets import DateOffset
    if isinstance(freq, DateOffset):
        freq = freq.rule_code

    if dayfirst is None:
        dayfirst = get_option("display.date_dayfirst")
    if yearfirst is None:
        yearfirst = get_option("display.date_yearfirst")

    return tslib.parse_datetime_string_with_reso(arg, freq=freq,
                                                 dayfirst=dayfirst,
                                                 yearfirst=yearfirst)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_parsers_dayfirst_yearfirst(self):
        # str : dayfirst, yearfirst, expected
        cases = {'10-11-12': [(False, False, datetime.datetime(2012, 10, 11)),
                              (True, False, datetime.datetime(2012, 11, 10)),
                              (False, True, datetime.datetime(2010, 11, 12)),
                              (True, True, datetime.datetime(2010, 11, 12))],
                 '20/12/21': [(False, False, datetime.datetime(2021, 12, 20)),
                              (True, False, datetime.datetime(2021, 12, 20)),
                              (False, True, datetime.datetime(2020, 12, 21)),
                              (True, True, datetime.datetime(2020, 12, 21))]}

        tm._skip_if_no_dateutil()
        from dateutil.parser import parse
        for date_str, values in compat.iteritems(cases):
            for dayfirst, yearfirst, expected in values:
                result1, _, _ = tools.parse_time_string(date_str,
                                                        dayfirst=dayfirst,
                                                        yearfirst=yearfirst)

                result2 = to_datetime(date_str, dayfirst=dayfirst,
                                      yearfirst=yearfirst)

                result3 = DatetimeIndex([date_str], dayfirst=dayfirst,
                                        yearfirst=yearfirst)[0]

                # Timestamp doesn't support dayfirst and yearfirst

                self.assertEqual(result1, expected)
                self.assertEqual(result2, expected)
                self.assertEqual(result3, expected)

                # compare with dateutil result
                dateutil_result = parse(date_str, dayfirst=dayfirst,
                                        yearfirst=yearfirst)
                self.assertEqual(dateutil_result, expected)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_parsers_iso8601(self):
        # GH 12060
        # test only the iso parser - flexibility to different
        # separators and leadings 0s
        # Timestamp construction falls back to dateutil
        cases = {'2011-01-02': datetime.datetime(2011, 1, 2),
                 '2011-1-2': datetime.datetime(2011, 1, 2),
                 '2011-01': datetime.datetime(2011, 1, 1),
                 '2011-1': datetime.datetime(2011, 1, 1),
                 '2011 01 02': datetime.datetime(2011, 1, 2),
                 '2011.01.02': datetime.datetime(2011, 1, 2),
                 '2011/01/02': datetime.datetime(2011, 1, 2),
                 '2011\\01\\02': datetime.datetime(2011, 1, 2),
                 '2013-01-01 05:30:00': datetime.datetime(2013, 1, 1, 5, 30),
                 '2013-1-1 5:30:00': datetime.datetime(2013, 1, 1, 5, 30)}
        for date_str, exp in compat.iteritems(cases):
            actual = tslib._test_parse_iso8601(date_str)
            self.assertEqual(actual, exp)

        # seperators must all match - YYYYMM not valid
        invalid_cases = ['2011-01/02', '2011^11^11',
                         '201401', '201111', '200101',
                         # mixed separated and unseparated
                         '2005-0101', '200501-01',
                         '20010101 12:3456', '20010101 1234:56',
                         # HHMMSS must have two digits in each component
                         # if unseparated
                         '20010101 1', '20010101 123', '20010101 12345',
                         '20010101 12345Z',
                         # wrong separator for HHMMSS
                         '2001-01-01 12-34-56']
        for date_str in invalid_cases:
            with tm.assertRaises(ValueError):
                tslib._test_parse_iso8601(date_str)
                # If no ValueError raised, let me know which case failed.
                raise Exception(date_str)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_to_datetime_tzlocal(self):
        from dateutil.parser import parse
        from dateutil.tz import tzlocal
        dt = parse('2012-06-13T01:39:00Z')
        dt = dt.replace(tzinfo=tzlocal())

        arr = np.array([dt], dtype=object)

        result = to_datetime(arr, utc=True)
        self.assertIs(result.tz, pytz.utc)

        rng = date_range('2012-11-03 03:00', '2012-11-05 03:00', tz=tzlocal())
        arr = rng.to_pydatetime()
        result = to_datetime(arr, utc=True)
        self.assertIs(result.tz, pytz.utc)
项目:smarthome    作者:smarthomeNG    | 项目源码 | 文件源码
def json_obj_hook(json_dict):
    """helper method for json deserialization"""
    import dateutil
    for (key, value) in json_dict.items():
        try:
            json_dict[key] = dateutil.parser.parse(value)
        except Exception as e :
            pass
    return json_dict
项目:filtered-alert-hub    作者:filtered-alert-hub    | 项目源码 | 文件源码
def makeItem(caproot,metadata):

    newitem = ET.Element("item")
    ET.register_namespace('dc', "http://purl.org/dc/elements/1.1/" )


    title = ET.SubElement(newitem, "title")
    link = ET.SubElement(newitem, "link")
    description = ET.SubElement(newitem, "description")
    category = ET.SubElement(newitem, "category")
    itempubDate = ET.SubElement(newitem, "pubDate")
    guid = ET.SubElement(newitem, "guid")
    creator = ET.SubElement(newitem, "{http://purl.org/dc/elements/1.1/}creator")
    itemdate = ET.SubElement(newitem, "{http://purl.org/dc/elements/1.1/}date")


    namespaces = {'cap': 'urn:oasis:names:tc:emergency:cap:1.1'}

    alerttime = dateutil.parser.parse(caproot.xpath("string(cap:sent/text())", namespaces=namespaces ))
    #print("got time {} from cap".format(alerttime))
    alerttime =  (alerttime - alerttime.utcoffset()).replace(tzinfo=None) if alerttime.utcoffset() else alerttime #convert to UTC
    #print("converted to  {} ".format(alerttime))


    title.text = caproot.xpath("string(cap:info/cap:headline/text())", namespaces=namespaces )
    link.text = metadata["link"]
    description.text = caproot.xpath("string(cap:info/cap:description/text())", namespaces=namespaces )
    itempubDate.text = alerttime.strftime("%a, %d %b %Y %H:%M:%S %z")
    itemdate.text = alerttime.isoformat()
    guid.text = caproot.xpath("string(cap:identifier/text())", namespaces=namespaces )

    sender = caproot.xpath("string(cap:sender/text())", namespaces=namespaces )
    senderName = caproot.xpath("string(cap:senderName/text())", namespaces=namespaces )

    creator.text =  "{} ({})".format(sender,senderName) if senderName else sender 
    category.text = ",".join(caproot.xpath("cap:info/cap:category/text()",namespaces=namespaces))

    return newitem
项目:catchpy    作者:nmaekawa    | 项目源码 | 文件源码
def _get_original_created(cls, catcha):
        '''convert `created` from catcha or return current date.'''
        try:
            original_date = dateutil.parser.parse(catcha['created'])
        except (TypeError, OverflowError, KeyError) as e:
            msg = ('error converting iso8601 `created` date in anno({}) '
                   'copy, setting a fresh date: {}').format(
                       catcha['id'], str(e))
            logger.error(msg, exc_info=True)
            original_date = datetime.now(dateutil.tz.tzutc()).replace(
                microsecond=0)
        else:
            return original_date
项目:cyphon    作者:dunbarcyber    | 项目源码 | 文件源码
def date_from_str(date_string, date_format=None):
    """

    """
    fail_msg = ('Could not parse the date string. '
                'Please check the date format')

    success_msg = ('Could not parse the date string using the given format, '
                   'so a different parser was used. Please check the date '
                   'format')

    if date_format:
        try:
            date = format_date(date_string, date_format)
        except ValueError as error:
            date = parse_date(date_string)
            if date:
                LOGGER.warning('%s: %s', success_msg, error)
            else:
                LOGGER.error('%s: %s', fail_msg, error)
    else:
        date = parse_date(date_string)
        if not date:
            LOGGER.error(fail_msg)

    return date
项目:simple-monitor-alert    作者:Nekmo    | 项目源码 | 文件源码
def human_since(since, include_tz=False):
    tz = dateutil.tz.tzlocal() if include_tz else None
    return naturaltime(datetime.datetime.now(tz=tz) - dateutil.parser.parse(since))
项目:simple-monitor-alert    作者:Nekmo    | 项目源码 | 文件源码
def send_alerts(self, observable, fail=True):
        communication = ObservableCommunication(observable, fail)
        for alert in self:
            seconds = observable.get_line_value('seconds', 0)
            since = self.sma.results.get_observable_result(observable).get('since')
            since = (dateutil.parser.parse(since) if since else datetime.datetime.now()).replace(tzinfo=tzlocal())
            dt = datetime.datetime.now().replace(tzinfo=tzlocal())
            if alert.section in self.sma.results.get_observable_result(observable)['alerted']:
                continue
            elif seconds and dt - since <= datetime.timedelta(seconds=seconds):
                continue
            success = alert.send(communication['subject'], communication['message'], **communication.alert_kwargs())
            if success:
                self.sma.results.add_alert_to_observable_result(observable, alert.section)
项目:Mastodon.py    作者:halcy    | 项目源码 | 文件源码
def __json_date_parse(json_object):
        """
        Parse dates in certain known json fields, if possible.
        """
        known_date_fields = ["created_at"]
        for k, v in json_object.items():
            if k in known_date_fields:
                try:
                    if isinstance(v, int):
                        json_object[k] = datetime.datetime.fromtimestamp(v, pytz.utc)
                    else:
                        json_object[k] = dateutil.parser.parse(v)
                except:
                    raise MastodonAPIError('Encountered invalid date.')
        return json_object
项目:gantthelper    作者:le1ca    | 项目源码 | 文件源码
def __init__(self, name, start_date, end_date, assigned_to=None):
        self.name        = name
        self.start_date  = dateutil.parser.parse(start_date)
        self.end_date    = dateutil.parser.parse(end_date)
        self.assigned_to = assigned_to
项目:mando.me    作者:z0noxz    | 项目源码 | 文件源码
def mc_ls(match = None):
        #import dateutil.parser

        dir = []
        Print.text()

        if _gs["system"] == Utility.os.LINUX: 
            wd = PHPInteractor.command("cd " + _gs["working_directory"] + " && ls").strip()
        elif _gs["system"] == Utility.os.WINDOWS:
            for o in PHPInteractor.command("echo off && cd " + _gs["working_directory"] + " && FOR /D %D IN (*) DO (echo %~aD;%~tD;;%~nD)").strip().split("\n") + PHPInteractor.command("echo off && cd " + _gs["working_directory"] + " && FOR %F IN (*) DO (echo %~aF;%~tF;%~zF;%~nxF)").strip().split("\n"):
                x = o.split(";")
                dt = parse(x[1])

                dir.append({
                    0 : x[0],
                    1 : Utility.filesize(int(x[2] if x[2] != "" else 0)).rjust(16),
                    2 : "dir" if x[2] == "" else x[3][x[3].rfind(".") + 1:] if "." in x[3] else "n/a",
                    3 : "{0}-{1:02}-{2:02} {3:02}:{4:02}".format(dt.year, dt.month, dt.day, dt.hour, dt.minute),
                    4 : x[3]
                })

        Print.table(
            caption = _gs["working_directory"],
            headers = ["Attributes", "Size", "Type", "Last modified", "Name"], 
            rows = dir
        )           
        Print.text()
项目:filtered-alert-hub    作者:filtered-alert-hub    | 项目源码 | 文件源码
def writeItemsToFeed(feedroot,items):

    #prune old items 
    timenow = datetime.utcnow() #now timezone aware. need to add in formating
    i=1
    for olditem in feedroot.xpath("/rss/channel/item"):
        itemnr = len(items)+i #need to consider the newly arrived items
        remove=False
        try:
            itemdate = dateutil.parser.parse(olditem.find('pubDate').text).replace(tzinfo=None)
            if itemnr > maxfeeditems and (timenow - itemdate) > timedelta(seconds=maxfeedsecs) :
                remove=True
        except TypeError as te:
            print("problem pruning item {} (nr:{} date:{} date:{} ) ({}) ".format(olditem,itemnr,itemdate,timenow,te))
        except AttributeError as ae:
            print("problem getting date of item {} ({}) .. pruning anyway".format( ET.tostring(olditem),ae) )
            remove = itemnr > maxfeeditems

        if remove:
            olditem.getparent().remove(olditem)
            if debug:
                print("removed item {} from feed".format(olditem))

        i=i+1

    existingitems = feedroot.xpath("/rss/channel/item")
    if existingitems:
        firstitem = existingitems[0]
        parent = firstitem.getparent()
        idx = parent.index(firstitem)

    else:
        parent = feedroot.xpath("/rss/channel")
        parent = parent[0]
        idx = len(parent)

    for i,newitem in enumerate(items):
        parent.insert(idx, newitem)

    #update pubdates

    timenow = datetime.utcnow() #now timezone aware. need to add in formating
    pubdate =  "{} GMT".format(timenow.strftime("%a, %d %b %Y %H:%M:%S")) 
    dcdate = "{}Z".format(timenow.isoformat())

    #print("setting date feed update to {} and {}".format(pubdate,dcdate))

    pubdateelem = feedroot.xpath("/rss/channel/pubDate")
    if pubdateelem:
        pubdateelem = pubdateelem[0]
        pubdateelem.text = pubdate

    namespaces = {'dc': 'http://purl.org/dc/elements/1.1/'}
    dcdateelem = feedroot.xpath("/rss/channel/dc:date",namespaces=namespaces)   
    if dcdateelem:
        dcdateelem = dcdateelem[0]
        dcdateelem.text = dcdate
项目:filtered-alert-hub    作者:filtered-alert-hub    | 项目源码 | 文件源码
def updateRss(feedupdate):

    feedinfo = feedupdate["feedinfo"]
    feedid =  feedinfo["id"]
    feedurl = feedinfo["url"]
    alerts = feedupdate["alerts"]

    print("updating {} with {} items".format(feedid,str(len(alerts))))
    items = []
    for alert in alerts:
        capxml = alert["capXml"]
        alertId = alert["hubRecordKey"]

        if debug:
            print("storing alert {} for feed {}".format(alertId,feedid))

        temp = StringIO.StringIO(capxml.encode('utf-8'))
        capdom = ET.parse(temp)
        caproot = capdom.getroot()

        metadata = {  "link" : "https://alert-hub.s3.amazonaws.com/{}".format( alert["hubRecordKey"] ) }

        newitem=makeItem(caproot,metadata)
        items.append(newitem)

        #print(ET.tostring(newitem, pretty_print=True))


    # get RSS from S3 and parse it
    s3 = boto3.client('s3')

    s3keyold =  "{}/rss.xml".format(feedid)
    s3bucketold = "alert-feeds"
    if debug:
        print("downloading old feed {} from S3 bucket {}".format(s3keyold,s3bucketold))

    response = s3.get_object(Bucket=s3bucketold,Key=s3keyold)
    parser = ET.XMLParser(remove_blank_text=True)
    rssdom = ET.parse(response["Body"],parser)
    rssroot = rssdom.getroot()

    writeItemsToFeed(rssroot,items)

    with BytesIO() as outdata:
        rssdom.write(outdata, xml_declaration=True ,pretty_print=True, encoding="UTF-8")
        outdata.seek(0)

        s3key = "{}/rss.xml".format(feedid)
        s3bucket = 'alert-feeds'
        if debug:
            print("writing back updated RSS feed to {} {}".format(s3bucket,s3key))
        s3.put_object(Bucket=s3bucket,Key=s3key,ACL='public-read',ContentType="application/rss+xml",Body=outdata.read())
项目:forseti-security    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def _transform_authorizednetworks(self, cloudsql_instances_map):
        """Yield an iterator of loadable authorized networks of cloudsql
        instances.

        Args:
            cloudsql_instances_map (iterable): instances as per-project
                dictionary.
                Example: {'project_number': 11111,
                          'instances': instances_dict}

        Yields:
            iterable: authorized network dictionary.
        """

        for instances_map in cloudsql_instances_map:
            instances = instances_map['instances']
            for item in instances:
                authorizednetworks = item.get('settings', {})\
                    .get('ipConfiguration', {}).get('authorizedNetworks', [{}])
                for network in authorizednetworks:
                    if network.get('expirationTime') is not None:
                        try:
                            parsed_time = dateutil_parser\
                                .parse(network.get('expirationTime'))
                            formatted_expirationtime = (
                                parsed_time\
                                    .strftime(self.MYSQL_DATETIME_FORMAT))
                        except (TypeError, ValueError, AttributeError) as e:
                            LOGGER.error(
                                'Unable to parse timeCreated' +\
                                'from authorizednetworks: %s\n%s',
                                network.get('expirationTime', ''), e)
                            formatted_expirationtime = '1972-01-01 00:00:00'
                    else:
                        formatted_expirationtime = '1972-01-01 00:00:00'

                    yield {
                        'project_number': instances_map['project_number'],
                        'instance_name': item.get('name'),
                        'kind': network.get('kind'),
                        'name': network.get('name'),
                        'value': network.get('value'),
                        'expiration_time': formatted_expirationtime
                    }
项目:forseti-security    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def _transform_ipaddresses(self, cloudsql_instances_map):
        """Yield an iterator of loadable ipAddresses of cloudsql instances.

        Args:
            cloudsql_instances_map (iterable): Instances as per-project
                dictionary.
                Example: {'project_number': 11111,
                          'instances': instances_dict}

        Yields:
            iterable: ipAddresses dictionary.
        """

        for instances_map in cloudsql_instances_map:
            instances = instances_map['instances']
            for item in instances:
                ipaddresses = item.get('ipAddresses', [{}])
                for ipaddress in ipaddresses:
                    if ipaddress.get('timeToRetire') is not None:
                        try:
                            parsed_time = dateutil_parser\
                                .parse(ipaddress.get('timeToRetire'))
                            formatted_timetoretire = (
                                parsed_time\
                                    .strftime(self.MYSQL_DATETIME_FORMAT))
                        except (TypeError, ValueError, AttributeError) as e:
                            LOGGER.error(
                                'Unable to parse timeCreated' +\
                                ' from ipaddresses: %s\n%s',
                                ipaddress.get('timeToRetire', ''), e)
                            formatted_timetoretire = '1972-01-01 00:00:00'
                    else:
                        formatted_timetoretire = '1972-01-01 00:00:00'

                    yield {
                        'project_number': instances_map['project_number'],
                        'instance_name': item.get('name'),
                        'ip_address': ipaddress.get('ipAddress'),
                        'type': ipaddress.get('type'),
                        'time_to_retire': formatted_timetoretire
                    }

    # pylint: disable=arguments-differ
项目:flask-zhenai-mongo-echarts    作者:Fretice    | 项目源码 | 文件源码
def to_mongo(self, value):
        if value is None:
            return value
        if isinstance(value, datetime.datetime):
            return value
        if isinstance(value, datetime.date):
            return datetime.datetime(value.year, value.month, value.day)
        if isinstance(value, collections.Callable):
            return value()

        if not isinstance(value, six.string_types):
            return None

        # Attempt to parse a datetime:
        if dateutil:
            try:
                return dateutil.parser.parse(value)
            except (TypeError, ValueError):
                return None

        # split usecs, because they are not recognized by strptime.
        if '.' in value:
            try:
                value, usecs = value.split('.')
                usecs = int(usecs)
            except ValueError:
                return None
        else:
            usecs = 0
        kwargs = {'microsecond': usecs}
        try:  # Seconds are optional, so try converting seconds first.
            return datetime.datetime(*time.strptime(value,
                                                    '%Y-%m-%d %H:%M:%S')[:6], **kwargs)
        except ValueError:
            try:  # Try without seconds.
                return datetime.datetime(*time.strptime(value,
                                                        '%Y-%m-%d %H:%M')[:5], **kwargs)
            except ValueError:  # Try without hour/minutes/seconds.
                try:
                    return datetime.datetime(*time.strptime(value,
                                                            '%Y-%m-%d')[:3], **kwargs)
                except ValueError:
                    return None