Python logger 模块,error() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用logger.error()

项目:gimel    作者:Alephbet    | 项目源码 | 文件源码
def preflight_checks():
    logger.info('checking aws credentials and region')
    if region() is None:
        logger.error('Region is not set up. please run aws configure')
        return False
    try:
        check_aws_credentials()
    except AttributeError:
        logger.error('AWS credentials not found. please run aws configure')
        return False
    logger.info('testing redis')
    try:
        from gimel import _redis
        _redis().ping()
    except redis.exceptions.ConnectionError:
        logger.error('Redis ping failed. Please run gimel configure')
        return False
    return True
项目:appetite    作者:Bridgewater    | 项目源码 | 文件源码
def get_commit_log(self):
        """Get the current commit log
        """
        try:
            log_object = {}
            for key, value in COMMIT_KEYS.items():
                stdout, _rc = helpers.run(['git', 'log', '-1', '--pretty=\'%s\'' % value],
                                          self.paths['repo_path'],
                                          self.dryrun)

                output = "XXXXX" if self.dryrun else helpers.filter_content(stdout)
                if key in consts.RENAME_COMMIT_LOG_KEYS:
                    key = consts.RENAME_COMMIT_LOG_KEYS[key]
                log_object[key] = output

            log_object['project'] = self.project
            log_object['reponame'] = self.reponame

            return log_object
        except Exception as e:
            logger.errorout("get_commit_log", error="Problem getting commit log",
                            error_msg=e.message, track=self.track)
项目:appetite    作者:Bridgewater    | 项目源码 | 文件源码
def get_template_content(path):
    """Read either yml or json files and store them as dict"""
    template_dict = {}

    _filename, file_extension = os.path.splitext(path)
    file_extension = file_extension.replace('.', '')
    if file_extension in consts.TEMPLATING_EXTS:
        try:
            template_content = {}
            abs_path = os.path.abspath(os.path.expandvars(path))
            with open(abs_path, 'r') as stream:
                if file_extension in consts.JSON_EXTS:
                    template_content = json.load(stream) #nosec
                elif file_extension in consts.YMAL_EXTS:
                    template_content = yaml.safe_load(stream) #nosec
            template_dict.update(template_content)
        except Exception as e:
            logger.errorout("Error reading templating file",
                            file=path, error=e.message)
    else:
        logger.errorout("No templating file found",
                        file=path)

    return template_dict
项目:appetite    作者:Bridgewater    | 项目源码 | 文件源码
def run(cmd, working_dir=None, dry_run=False):
    """Runs local cmd command"""

    cmd_split = shlex.split(cmd) if isinstance(cmd, basestring) else cmd

    if dry_run:
        return " ".join(cmd_split), 0

    try:
        p = Popen(cmd_split, shell=False, stderr=STDOUT, stdout=PIPE, cwd=working_dir)

        communicate = p.communicate()

        return communicate[0].strip(), p.returncode
    except OSError as e:
        logger.errorout("Run OSError", error=e.message)
    except: # pylint: disable=bare-except
        logger.errorout("Run Error")

    return
项目:gimel    作者:Alephbet    | 项目源码 | 文件源码
def rollback_lambda(name, alias=LIVE):
    all_versions = _versions(name)
    live_version = _get_version(name, alias)
    try:
        live_index = all_versions.index(live_version)
        if live_index < 1:
            raise RuntimeError('Cannot find previous version')
        prev_version = all_versions[live_index - 1]
        logger.info('rolling back to version {}'.format(prev_version))
        _function_alias(name, prev_version)
    except RuntimeError as error:
        logger.error('Unable to rollback. {}'.format(repr(error)))
项目:appetite    作者:Bridgewater    | 项目源码 | 文件源码
def pull_repo(self, force=False):
        """Clone repo to specified dir.  Delete repo if it currently exist unless reuse.
        """
        try:
            helpers.create_path(self.paths['absolute_path'], True)

            if force:
                self.delete_repo()

            if not os.path.exists(self.paths['repo_path']):
                logger.info("Starting Repo Cloning", track=self.track)

                output, rc = helpers.run(
                    "git clone -b %s %s" % (self.branch, self.url),
                    self.paths['absolute_path'],
                    self.dryrun)

                if rc > 0:
                    self.delete_repo()
                    logger.error("Pulling_repo", error=output, path=self.paths['repo_path'])
                    return -1
                return 1
            else:
                return 0
        except Exception as e:
            logger.errorout("Pulling_repo", err_msg=e.message,
                            error="Error pulling repo", path=self.paths['repo_path'])
项目:appetite    作者:Bridgewater    | 项目源码 | 文件源码
def set_commit_id(self, commit_id=None):
        """Checks out the commit id for the repo
        """
        checkout_id = commit_id if commit_id else self.branch

        # Already checked out
        if self.prev_commit == checkout_id:
            return True

        cmd = "git checkout {0}".format(checkout_id)
        output, rc = self.run_command(cmd)

        if rc > 0:
            # Corrupted checkout state, try to recover
            logger.warn("Possible corrupted checkout state", desc="Problem with checkout", error=output,
                        commit_id=checkout_id, path=self.paths['repo_path'],
                        cmd=cmd, track=self.track)

            # Want to guarantee that the branch is completely reset.
            git_reset_output, rc = self.run_command("git reset --hard {0}".format(checkout_id)) #pylint: disable=unused-variable

            if rc < 1:
                # Clean up git so there are no untracked files.
                self.run_command("git clean -fd")

        if rc > 0:
            logger.errorout("set_commit_id", desc="Problem setting commit id", error=output,
                            commit_id=checkout_id, path=self.paths['repo_path'],
                            cmd=cmd, track=self.track)

        self.prev_commit = checkout_id

        return True
项目:appetite    作者:Bridgewater    | 项目源码 | 文件源码
def function_importer(mod_str): # pylint: disable=too-complex
    """Import Module from external source"""
    mod_split = mod_str.split(":")
    if len(mod_split) != 2:
        logger.error("Can not import function", mod=mod_str)
        return None

    mod_path = mod_split[0]
    funct_name = mod_split[1].split('.')

    path, filename = os.path.split(mod_path)
    mod_name, ext = os.path.splitext(filename) # pylint: disable=unused-variable
    mod = None

    # try to load precompiled in first if it exists
    if os.path.exists(os.path.join(path, mod_name)+'.pyc'):
        try:
            mod = imp.load_compiled(mod_name, mod_path)
        except: # pylint: disable=bare-except
            pass

    if os.path.exists(os.path.join(path, mod_name)+'.py'):
        try:
            mod = imp.load_source(mod_name, mod_path)
        except Exception as e:
            logger.error("No Class to import", mod=mod_str, error=e.message)

    # Pull function if embedded in classes
    for i, mod_part in enumerate(funct_name):
        if mod and hasattr(mod, mod_part):
            if i == len(funct_name) - 1:
                if len(funct_name) > 1:
                    return getattr(mod(), mod_part)
                return getattr(mod, mod_part)
            mod = getattr(mod, mod_part)

    logger.error("Function not valid/callable", mod=mod_str)
    return None
项目:appetite    作者:Bridgewater    | 项目源码 | 文件源码
def cmd_check(cmd):
    """Basic check for redirection in command"""

    try:
        results = next((False for param in shlex.split(cmd)
                        for rparam in REDIRECT_COMMANDS
                        if rparam == param), True)

        if not results:
            logger.warning("Possible injection", cmd=cmd)
    except Exception as error:
        logger.warning("Possible injection/weirdness", cmd=cmd, error=error.message)
项目:appetite    作者:Bridgewater    | 项目源码 | 文件源码
def template_directory(app_path, templating_values):
    """Template files

    Walks through all the files a directory and templates any jinja2 values
    found.
    """

    if not check_path(app_path):
        logger.errorout("Can not copy location that does not exist",
                        path=app_path)

    tvalues = merge_templates(templating_values)

    for path, _dir, files in os.walk(app_path):
        # sort files so logs read better and easier to get status
        files.sort()
        j2_env = Environment(autoescape=True, loader=FileSystemLoader(path))
        for filename in files:
            # Should not template version file since it may have
            # regex commands that can break templating.
            if filename.startswith(consts.VERSIONS_FILENAME):
                continue

            file_path = os.path.join(path, filename)
            try:
                file_content = j2_env.get_template(filename).render(tvalues)

                with open(file_path, 'w') as f:
                    f.write(file_content)
            except Exception as e:
                logger.errorout('Error templating file', file=file_path, error=e.message)
项目:appetite    作者:Bridgewater    | 项目源码 | 文件源码
def __exit__(self, type, value, tb): # pylint: disable=redefined-builtin
        """Exit RunSingleInstance class
        :return: None
        """
        try:
            if not self.__is_running:
                fcntl.lockf(self.__filelock, fcntl.LOCK_UN)
                self.__filelock.close()
                os.unlink(self.__lockfile)
        except Exception as err:
            logger.error("Error unlocking single instance file", error=err.message)
项目:appetite    作者:Bridgewater    | 项目源码 | 文件源码
def get_std_error_from_channel(channel):
    """Get std Error from an existing channel"""
    stderr = ""
    # Make sure we read everything off the error buffer
    if channel.recv_stderr_ready():
        error_buff = channel.recv_stderr(1024)
        while error_buff:
            stderr += error_buff
            error_buff = channel.recv_stderr(1024)
    return stderr
项目:appetite    作者:Bridgewater    | 项目源码 | 文件源码
def _error_check(err_msg, remote_file, hostname, function_name):
    """Generic error checker for communication"""

    if len(err_msg) > 0:
        error_msg = next((err for err in ERROR_MESSAGES if err in err_msg), "Communication Error")

        logger.error(error_msg,
                     function=function_name,
                     filename=remote_file,
                     hostname=hostname,
                     module=COMMAND_MODULE_CUSTOM)
项目:tvalacarta    作者:tvalacarta    | 项目源码 | 文件源码
def download_channel(channel_name):
    logger.info("tvalacarta.core.updater download_channel('"+channel_name+"')")
    # Canal remoto
    remote_channel_url , remote_version_url = get_channel_remote_url(channel_name)

    # Canal local
    local_channel_path , local_version_path , local_compiled_path = get_channel_local_path(channel_name)

    # Descarga el canal
    updated_channel_data = scrapertools.cachePage( remote_channel_url )
    try:
        outfile = open(local_channel_path,"w")
        outfile.write(updated_channel_data)
        outfile.flush()
        outfile.close()
        logger.info("tvalacarta.core.updater Grabado a " + local_channel_path)
    except:
        logger.info("tvalacarta.core.updater Error al grabar " + local_channel_path)
        import sys
        for line in sys.exc_info():
            logger.error( "%s" % line )

    # Descarga la version (puede no estar)
    try:
        updated_version_data = scrapertools.cachePage( remote_version_url )
        outfile = open(local_version_path,"w")
        outfile.write(updated_version_data)
        outfile.flush()
        outfile.close()
        logger.info("tvalacarta.core.updater Grabado a " + local_version_path)
    except:
        import sys
        for line in sys.exc_info():
            logger.error( "%s" % line )

    if os.path.exists(local_compiled_path):
        os.remove(local_compiled_path)
项目:tvalacarta    作者:tvalacarta    | 项目源码 | 文件源码
def getSiteCachePath(url):
    # Obtiene el dominio principal de la URL    
    dominio = urlparse.urlparse(url)[1]
    logger.debug("[scrapertools.py] dominio="+dominio)
    nombres = dominio.split(".")
    if len(nombres)>1:
        dominio = nombres[len(nombres)-2]+"."+nombres[len(nombres)-1]
    else:
        dominio = nombres[0]
    logger.debug("[scrapertools.py] dominio="+dominio)

    # Crea un directorio en la cache para direcciones de ese dominio
    siteCachePath = os.path.join( CACHE_PATH , dominio )
    if not os.path.exists(CACHE_PATH):
        try:
            os.mkdir( CACHE_PATH )
        except:
            logger.error("[scrapertools.py] Error al crear directorio "+CACHE_PATH)

    if not os.path.exists(siteCachePath):
        try:
            os.mkdir( siteCachePath )
        except:
            logger.error("[scrapertools.py] Error al crear directorio "+siteCachePath)

    logger.debug("[scrapertools.py] siteCachePath="+siteCachePath)

    return siteCachePath
项目:QTAF    作者:Tencent    | 项目源码 | 文件源码
def verify(msg,compare_object):
    '''?????????????Log??Error?????????Exception,?????? 

    :param compare_object: ????
    :type compare_object: By.CompareBase
    '''
    if compare_object.compare() != True: 
        logger.error(msg, extra={'actual':compare_object.Actual, 'expect':compare_object.Expect})
        return False
    else:
        return True
项目:QTAF    作者:Tencent    | 项目源码 | 文件源码
def verifyTrue(message,actual):
    '''????actual?????True
    '''
    if not isinstance(actual,bool):
        raise TypeError("actual type %s is not a bool" % type(actual))
    if actual != True:
        logger.error(message, extra={'actual':actual, 'expect':True})
        return False
    return True
项目:QTAF    作者:Tencent    | 项目源码 | 文件源码
def verifyTrueWait(message,actualfunc,actargs,timeout=10,interval=0.5):
    '''??interval??actualfunc???????True????timeout???????????????

       :param message: ????????
       :param actualfunc: ??????????
       :param actargs: ???????????
       :param timeout: ????
       :param interval: ??????
    '''
    result = _waitForCompareResult(actualfunc,actargs,True,timeout,interval)
    if result[0]==False:
        logger.error("%s[Timeout:?%d?????%d?]" % (message,timeout,result[1]), extra={'actual':result[2], 'expect':True})
项目:QTAF    作者:Tencent    | 项目源码 | 文件源码
def verifyEqual(message,actual,expect):
    '''???????????????????????

       :param message: ????
       :param actual: ???
       :param expect: ???
       :return: True or False
    '''
    if actual != expect:
        logger.error(message, extra={'actual':actual, 'expect':expect})
        return False
    return True
项目:QTAF    作者:Tencent    | 项目源码 | 文件源码
def verifyEqualWait(message,actualfunc,actargs,expect,timeout=10,interval=0.5):
    '''??interval?????????????????timeout???????????????

       :param message: ????????
       :param actualfunc: ??????????
       :param actargs: ???????????
       :param expect: ???
       :param timeout: ????
       :param interval: ??????
    '''
    result = _waitForCompareResult(actualfunc,actargs,expect,timeout,interval)
    if result[0]==False:
        logger.error("%s[Timeout:?%d?????%d?]" % (message,timeout,result[1]), extra={'actual':result[2], 'expect':expect})
项目:QTAF    作者:Tencent    | 项目源码 | 文件源码
def verifyMatchWait(message,actualfunc,actargs,regexpect,timeout=10,interval=0.5):
    '''??interval??actualfunc???????????regexpect????????timeout???????????????

       :param message: ????????
       :param actualfunc: ??????????
       :param actargs: ???????????
       :param regexpect: ??????????
       :param timeout: ????
       :param interval: ??????
       :return: True or False
    '''
    compareobj = lambda x:re.search(regexpect, x)!=None
    result = _waitForCompareResult(actualfunc,actargs,compareobj,timeout,interval)
    if result[0]==False:
        logger.error("%s[Timeout:?%d?????%d?]" % (message,timeout,result[1]), extra={'actual':result[2], 'expect':regexpect})
项目:QTAF    作者:Tencent    | 项目源码 | 文件源码
def verifyCompareFunc(message,actual,comparefunc):
    '''????actual???comparefunc???????True??False???????

        :param actual: ???
        :type actual: tuple or dict or ??????
        :param comparefunc: ??????????????????
        :return comparefunc: True
    '''
    actret = _getFuncResult(comparefunc,actual)
    if actret != True:
        logger.error(message, extra={'actual':actret, 'expect':True})
        return False
    return True
项目:QTAF    作者:Tencent    | 项目源码 | 文件源码
def verifyCompareFuncWait(message,actualfunc,actargs,comparefunc,timeout=10,interval=0.5):
    '''??interval?actualfunc??????comparefunc??????????True????timeout??????True????????

       :param message: ????????
       :param actualfunc: ??????????
       :param actargs: ???????????
       :param comparefunc: ??????????????????
       :return comparefunc: True
       :param timeout: ????
       :param interval: ??????
    '''
    result = _waitForCompareResult(actualfunc,actargs,comparefunc,timeout,interval)
    if result[0]==False:
        logger.error("%s[Timeout:?%d?????%d?]" % (message,timeout,result[1]), extra={'actual':result[2], 'expect':True})
项目:QTAF    作者:Tencent    | 项目源码 | 文件源码
def verifyPropertyWait(message,obj,prop_name,expect,timeout=10,interval=0.5):
    '''??interval??obj.prop_name???expected??????timeout???????????????

       :param message: ????????
       :param obj: ???????
       :type prop_name: string 
       :param prop_name: ??????????????????
       :param expect: ????????
       :param timeout: ????
       :param interval: ??????
    '''
    result = _waitForCompareResult(_getObjProperty,{'obj':obj,'prop_name':prop_name},expect,timeout,interval)
    if result[0]==False:
        logger.error("%s[Timeout:?%d?????%d?]" % (message,timeout,result[1]), extra={'actual':result[2], 'expect':expect})
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def load_json(*args, **kwargs):
    if "object_hook" not in kwargs:
        kwargs["object_hook"] = to_utf8

    try:
        value = json.loads(*args, **kwargs)
    except:
        logger.error("**NO** se ha podido cargar el JSON")
        logger.error(traceback.format_exc())
        value = {}

    return value
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def dump_json(*args, **kwargs):
    if not kwargs:
        kwargs = {"indent": 4, "skipkeys": True, "sort_keys": True, "ensure_ascii": False}

    try:
        value = json.dumps(*args, **kwargs)
    except:
        logger.error("**NO** se ha podido cargar el JSON")
        logger.error(traceback.format_exc())
        value = ""
    return value
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def check_json_file(data, fname, dict_data):
    """
    Comprueba que si dict_data(conversion del fichero JSON a dict) no es un diccionario, se genere un fichero con
    data de nombre fname.bk.

    @param data: contenido del fichero fname
    @type data: str
    @param fname: nombre del fichero leido
    @type fname: str
    @param dict_data: nombre del diccionario
    @type dict_data: dict
    """
    logger.info()

    if not dict_data:
        logger.error("Error al cargar el json del fichero %s" % fname)

        if data != "":
            # se crea un nuevo fichero
            from core import filetools
            title = filetools.write("%s.bk" % fname, data)
            if title != "":
                logger.error("Ha habido un error al guardar el fichero: %s.bk" % fname)
            else:
                logger.debug("Se ha guardado una copia con el nombre: %s.bk" % fname)
        else:
            logger.debug("Está vacío el fichero: %s" % fname)
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def download_channel(channel_name):
    logger.info(channel_name)

    import channeltools
    remote_channel_url , remote_version_url = channeltools.get_channel_remote_url(channel_name)
    local_channel_path , local_version_path , local_compiled_path = channeltools.get_channel_local_path(channel_name)

    # Descarga el canal
    try:
        updated_channel_data = scrapertools.cachePage( remote_channel_url )
        outfile = open(local_channel_path,"wb")
        outfile.write(updated_channel_data)
        outfile.flush()
        outfile.close()
        logger.info("Grabado a " + local_channel_path)
    except:
        import traceback
        logger.error(traceback.format_exc())

    # Descarga la version (puede no estar)
    try:
        updated_version_data = scrapertools.cachePage( remote_version_url )
        outfile = open(local_version_path,"w")
        outfile.write(updated_version_data)
        outfile.flush()
        outfile.close()
        logger.info("Grabado a " + local_version_path)
    except:
        import traceback
        logger.error(traceback.format_exc())

    if os.path.exists(local_compiled_path):
        os.remove(local_compiled_path)

    from platformcode import platformtools
    platformtools.dialog_notification(channel_name+" actualizado", "Se ha descargado una nueva versión")
项目:plugin.video.streamondemand-pureita    作者:orione7    | 项目源码 | 文件源码
def download_channel(channel_name):
    logger.info("streamondemand-pureita.core.updater download_channel('"+channel_name+"')")
    # Canal remoto
    remote_channel_url , remote_version_url = get_channel_remote_url(channel_name)

    # Canal local
    local_channel_path , local_version_path , local_compiled_path = get_channel_local_path(channel_name)

    # Descarga el canal
    updated_channel_data = scrapertools.cache_page( remote_channel_url )
    try:
        outfile = open(local_channel_path,"w")
        outfile.write(updated_channel_data)
        outfile.flush()
        outfile.close()
        logger.info("streamondemand-pureita.core.updater Grabado a " + local_channel_path)
    except:
        logger.info("streamondemand-pureita.core.updater Error al grabar " + local_channel_path)
        import sys
        for line in sys.exc_info():
            logger.error( "%s" % line )

    # Descarga la version (puede no estar)
    try:
        updated_version_data = scrapertools.cache_page( remote_version_url )
        outfile = open(local_version_path,"w")
        outfile.write(updated_version_data)
        outfile.flush()
        outfile.close()
        logger.info("streamondemand-pureita.core.updater Grabado a " + local_version_path)
    except:
        import sys
        for line in sys.exc_info():
            logger.error( "%s" % line )

    if os.path.exists(local_compiled_path):
        os.remove(local_compiled_path)
项目:plugin.video.streamondemand-pureita    作者:orione7    | 项目源码 | 文件源码
def getSiteCachePath(url):
    # Obtiene el dominio principal de la URL    
    dominio = urlparse.urlparse(url)[1]
    logger.debug("[scrapertools.py] dominio="+dominio)
    nombres = dominio.split(".")
    if len(nombres)>1:
        dominio = nombres[len(nombres)-2]+"."+nombres[len(nombres)-1]
    else:
        dominio = nombres[0]
    logger.debug("[scrapertools.py] dominio="+dominio)

    # Crea un directorio en la cache para direcciones de ese dominio
    siteCachePath = os.path.join( CACHE_PATH , dominio )
    if not os.path.exists(CACHE_PATH):
        try:
            os.mkdir( CACHE_PATH )
        except:
            logger.error("[scrapertools.py] Error al crear directorio "+CACHE_PATH)

    if not os.path.exists(siteCachePath):
        try:
            os.mkdir( siteCachePath )
        except:
            logger.error("[scrapertools.py] Error al crear directorio "+siteCachePath)

    logger.debug("[scrapertools.py] siteCachePath="+siteCachePath)

    return siteCachePath
项目:plugin.video.streamondemand-pureita    作者:orione7    | 项目源码 | 文件源码
def mainlist(item):
    logger.info("[favoritos.py] mainlist")
    itemlist=[]

    # Crea un listado con las entradas de favoritos
    if usingsamba(BOOKMARK_PATH):
        ficheros = samba.get_files(BOOKMARK_PATH)
    else:
        ficheros = os.listdir(BOOKMARK_PATH)

    # Ordena el listado por nombre de fichero (orden de incorporacin)
    ficheros.sort()

    # Rellena el listado
    for fichero in ficheros:

        try:
            # Lee el bookmark
            canal,titulo,thumbnail,plot,server,url,fulltitle = readbookmark(fichero)
            if canal=="":
                canal="favoritos"

            # Crea la entrada
            # En extra va el nombre del fichero para poder borrarlo
            ## <-- Aado fulltitle con el titulo de la peli
            itemlist.append( Item( channel=canal , action="play" , url=url , server=server, title=fulltitle, thumbnail=thumbnail, plot=plot, fanart=thumbnail, extra=os.path.join( BOOKMARK_PATH, fichero ), fulltitle=fulltitle, folder=False ))
        except:
            for line in sys.exc_info():
                logger.error( "%s" % line )

    return itemlist
项目:Hecatoncheir    作者:snaga    | 项目源码 | 文件源码
def connect(self):
        if self.dbconn is None:
            log.info(_("Connecting the database."))
            try:
                self.dbdriver.connect()
            except DbProfilerException as e:
                log.error(_("Could not connect to the database."),
                          detail=e.source)
                log.error(_("Abort."))
                sys.exit(1)

            self.dbconn = self.dbdriver.conn
            log.info(_("Connected to the database."))
        return True
项目:Hecatoncheir    作者:snaga    | 项目源码 | 文件源码
def _query_column_profile(self, column_names, query):
        """Common code shared by PostgreSQL/MySQL/Oracle/MSSQL profilers
        to collect column profiles of the table.

        Args:
          column_names(list): column names.
          query(str): a query string to be executed on each database.

        Returns:
          tuple: (num_rows, minmax, nulls)
                 minmax and nulls are dictionaries having column names as
                 the keys.
        """
        _minmax = {}
        _nulls = {}
        num_rows = None
        try:
            rs = self.dbdriver.q2rs(query)
            assert len(rs.resultset) == 1

            a = copy.copy(list(rs.resultset[0]))
            num_rows = a.pop(0)
            log.trace("_query_column_profile: rows %d" % num_rows)
            i = 0
            while len(a) > 0:
                nulls = a.pop(0)
                colmin = a.pop(0)
                colmax = a.pop(0)
                log.trace(("_query_column_profile: col %s %d %s %s" %
                          (column_names[i], nulls, colmin, colmax)))
                _minmax[column_names[i]] = [colmin, colmax]
                _nulls[column_names[i]] = nulls
                i += 1
        except QueryError as e:
            log.error(_("Could not get row count/num of "
                        "nulls/min/max values."),
                      detail=e.value, query=query)
            raise e

        log.trace("_query_column_profile: %s" % str(_minmax))
        return (num_rows, _minmax, _nulls)
项目:Hecatoncheir    作者:snaga    | 项目源码 | 文件源码
def init(self):
        try:
            if os.path.exists(self.filename):
                log.info(_("The repository already exists."))
                return True
            self.__init_sqlite3(self.filename)
        except Exception as e:
            log.error(_("Could not create the repository."), detail=unicode(e))
            return False
        log.info(_("The repository has been initialized."))
        return True
项目:Hecatoncheir    作者:snaga    | 项目源码 | 文件源码
def destroy(self):
        try:
            if os.path.exists(self.filename):
                os.unlink(self.filename)
        except Exception as e:
            log.error(_("Could not destroy the repository."),
                      detail=unicode(e))
            return False
        log.info(_("The repository has been destroyed."))
        return True
项目:Hecatoncheir    作者:snaga    | 项目源码 | 文件源码
def set(self, data):
        try:
            cursor = self._conn.cursor()
            cursor.execute("DELETE FROM repo")
            self._conn.commit()
        except Exception as e:
            log.error(_("Could not initialize the repository."),
                      detail=unicode(e))
            return False

        for d in data:
            self.append_table(d)

        return True
项目:Hecatoncheir    作者:snaga    | 项目源码 | 文件源码
def get_table_list(self, database_name=None, schema_name=None,
                       table_name=None):
        table_list = []

        cond = []
        if database_name:
            cond.append("database_name = '%s'" % database_name)
        if schema_name:
            cond.append("schema_name = '%s'" % schema_name)
        if table_name:
            cond.append("table_name = '%s'" % table_name)
        where = "WHERE (%s)" % " AND ".join(cond) if cond else ''

        query = """
SELECT DISTINCT database_name, schema_name, table_name
  FROM repo
{0}
 ORDER BY database_name, schema_name, table_name
""".format(where)

        log.trace("get_table_list: query = %s" % query)

        try:
            cursor = self._conn.cursor()
            for r in cursor.execute(query):
                table_list.append([r[0], r[1], r[2]])
        except Exception as e:
            log.error(_("Could not get data."), detail=unicode(e))
            return None

        return table_list
项目:Hecatoncheir    作者:snaga    | 项目源码 | 文件源码
def validate_record(self, column_names, column_values):
        validated_count = 0
        failed_count = 0

        assert len(column_names) == len(column_values)

        # new record validator
        for label in self.record_validators:
            validator = self.record_validators[label]
            validated_count += 1
            try:
                if validator.validate(column_names, column_values) is False:
                    log.trace("VALIDATION FAILED: %s %s %s %s" %
                              (validator.label, unicode(validator.rule),
                               validator.column_names, unicode(column_values)))
                    self._column_counter.incr(validator.rule[0],
                                              validator.label)
                    failed_count += 1
                else:
                    log.trace("VALIDATION OK: %s %s %s %s" %
                              (validator.label, unicode(validator.rule),
                               validator.column_names, unicode(column_values)))
            except ValidationError as e:
                log.error(u'%s' % e.value)
                log.trace("VALIDATION FAILED: %s %s %s %s" %
                          (validator.label, unicode(validator.rule),
                           validator.column_names, unicode(column_values)))
                self._column_counter.incr(validator.rule[0], validator.label)
                failed_count += 1
                continue

        if failed_count > 0:
            return False
        return True
项目:Hecatoncheir    作者:snaga    | 项目源码 | 文件源码
def validate_table(self, table_data):
        validated_count = 0
        failed_count = 0

        # Run statistics validators.
        for label in self.statistics_validators:
            validator = self.statistics_validators[label]
            log.info(_("Validating column statistics: %s") %
                     '; '.join(validator.rule))
            validated_count += 1
            try:
                res = validator.validate(table_data)
            except ValidationError as e:
                log.error(u'%s' % e.value)
                res = False

            if res is False:
                log.trace("VALIDATION FAILED: %s %s %s" %
                          (validator.label, unicode(validator.rule),
                           validator.column_names))
                self._column_counter.incr(validator.rule[0], validator.label)
                failed_count += 1
            else:
                log.trace("VALIDATION OK: %s %s %s" %
                          (validator.label, unicode(validator.rule),
                           validator.column_names))

        return (validated_count, failed_count)
项目:Hecatoncheir    作者:snaga    | 项目源码 | 文件源码
def validate_sql(self, dbdriver):
        if dbdriver is None:
            raise DriverError(u'Database driver not found.')

        validated_count = 0
        failed_count = 0
        for label in self.sql_validators:
            validator = self.sql_validators[label]
            log.info(_("Validating with SQL: %s") % '; '.join(validator.rule))
            validated_count += 1

            try:
                res = validator.validate(dbdriver)
            except ValidationError as e:
                log.error(_("SQL validation error: %s") %
                          '; '.join(validator.rule),
                          detail=e.source.value if e.source else None)
                self._column_counter.incr(validator.rule[0], validator.label)
                failed_count += 1
                continue

            if res is False:
                self._column_counter.incr(validator.rule[0], validator.label)
                failed_count += 1

        return (validated_count, failed_count)
项目:Hecatoncheir    作者:snaga    | 项目源码 | 文件源码
def export_file(filename, body):
    try:
        f = open(filename, "w")
        f.write(body.encode('utf-8'))
        f.close()
        log.info(_("Generated %s.") % filename)
    except IOError as e:
        log.error(_("Could not generate %s: %s") % (filename, unicode(e)))
        return False
    return True
项目:LeoMuFundPicker    作者:leodengyx    | 项目源码 | 文件源码
def __save_mutual_fund_obj_strategy_portion(self, mutual_fund_inst, get_parameter_dict):

        logger.info(
            "__save_mutual_fund_obj_strategy_portion() function entry. {'get_parameter_dict': %s}" % get_parameter_dict)

        # Get mutual fund objective and strategy portion
        query_args = {"url": "http://financials.morningstar.com/fund/investObjAndStrategy.html?",
                              "t": get_parameter_dict["t"],
                              "region": get_parameter_dict["region"],
                              "culture": get_parameter_dict["culture"],
                              "cur": get_parameter_dict["cur"],
                              "productCode": get_parameter_dict["productCode"]}
        request = urllib2.Request(self.mutual_fund_info_url + "?" + urllib.urlencode(query_args))
        request.add_header("User-Agent",
                                   "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36")
        logger.debug("Http request: %s" % request.get_full_url())

        response = urllib2.urlopen(request)
        mutual_fund_info_obj_strategy_soup = BeautifulSoup(response.read(), "html.parser")

        # Save Objective and Strategy
        try:
            div_tag_list = mutual_fund_info_obj_strategy_soup.find_all("div")
            mutual_fund_inst.inve_objective_strategy = unicode(div_tag_list[1].string).lstrip().rstrip()
            logger.debug("Save fund objective and strategy: %s" % mutual_fund_inst.inve_objective_strategy)
        except:
            mutual_fund_inst.inve_objective_strategy = ""
            logger.error("Error reading Invest Objective Strategy of fund %s" % mutual_fund_inst.fund_name)
项目:pydwd    作者:ckaus    | 项目源码 | 文件源码
def download_file(host, file_path, file_name):
    try:
        ftp = ftplib.FTP(host)
        ftp.login()
        ftp.cwd(file_path)
        ftp.retrbinary('RETR ' + file_path + file_name, open(file_name, 'wb').write)
        logger.success('Download: %s' % host + file_path + file_name)
        ftp.quit()
    except ftplib.all_errors as e:
        logger.error('%s\nCannot download file: %s.' % (e, host + file_path + file_name))
项目:pydwd    作者:ckaus    | 项目源码 | 文件源码
def get_response(url):
    try:
        return urllib2.urlopen(url)
    except (urllib2.HTTPError, urllib2.URLError) as error:
        raise error
    except Exception as error:
        logger.error('Exception: %s' % (traceback.format_exc(), error))
项目:appetite    作者:Bridgewater    | 项目源码 | 文件源码
def get_ssh_client(hostname, ssh_hostname):
        """Tries to create ssh client

        Create ssh client based on the username and ssh key
        """

        if not CREDS.SSH_KEYFILE:
            logger.errorout("ssh_keyfile not set",
                            module=COMMAND_MODULE_CUSTOM)

        retries = 0

        while retries < MAX_SSH_RETRIES:
            try:
                ssh = paramiko.SSHClient()
                ssh.load_system_host_keys()
                ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())

                ssh.connect(hostname=ssh_hostname,
                            username=CREDS.SSH_USER,
                            port=CREDS.SSH_PORT,
                            pkey=CREDS.PK,
                            timeout=CONNECTION_TIMEOUT)

                return ssh
            except paramiko.BadAuthenticationType:
                logger.error("BadAuthenticationType",
                             hostname=hostname,
                             module=COMMAND_MODULE_CUSTOM)
                return
            except paramiko.AuthenticationException:
                logger.error("Authentication failed",
                             hostname=hostname,
                             module=COMMAND_MODULE_CUSTOM)
                return
            except paramiko.BadHostKeyException:
                logger.error("BadHostKeyException",
                             fix="Edit known_hosts file to remove the entry",
                             hostname=hostname,
                             module=COMMAND_MODULE_CUSTOM)
                return
            except paramiko.SSHException:
                logger.error("SSHException",
                             hostname=hostname,
                             module=COMMAND_MODULE_CUSTOM)
                return
            except Exception as e:
                if retries == 0:
                    logger.error("Problems connecting to host",
                                 hostname=hostname,
                                 module=COMMAND_MODULE_CUSTOM,
                                 error=e.message)
                retries += 1
                time.sleep(1)

        logger.error("Can not connect to host",
                     hostname=hostname,
                     module=COMMAND_MODULE_CUSTOM)

        return None
项目:tvalacarta    作者:tvalacarta    | 项目源码 | 文件源码
def downloadIfNotModifiedSince(url,timestamp):

    logger.info("tvalacarta.core.downloadtools downloadIfNotModifiedSince("+url+","+time.ctime(timestamp)+")")

    # Convierte la fecha a GMT
    fechaFormateada = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(timestamp))
    logger.info("fechaFormateada=%s" % fechaFormateada)

    # Comprueba si ha cambiado
    inicio = time.clock()
    req = urllib2.Request(url)
    req.add_header('If-Modified-Since', fechaFormateada)
    req.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12')

    updated = False

    try:
        response = urllib2.urlopen(req)
        data = response.read()
        #info = response.info()
        #logger.info( info.headers )

        # Si llega hasta aquí, es que ha cambiado
        updated = True
        response.close()

    except urllib2.URLError,e:
        # Si devuelve 304 es que no ha cambiado
        if hasattr(e,'code'):
            logger.info("Codigo de respuesta HTTP : %d" %e.code)
            if e.code == 304:
                logger.info("No ha cambiado")
                updated = False
        # Agarra los errores con codigo de respuesta del servidor externo solicitado     
        else:
            for line in sys.exc_info():
                logger.error( "%s" % line )
        data=""

    fin = time.clock()
    logger.info("Descargado en %d segundos " % (fin-inicio+1))

    return updated,data

# Download history
项目:tvalacarta    作者:tvalacarta    | 项目源码 | 文件源码
def unescape(text):
    """Removes HTML or XML character references 
       and entities from a text string.
       keep &amp;, &gt;, &lt; in the source code.
    from Fredrik Lundh
    http://effbot.org/zone/re-sub.htm#unescape-html
    """
    def fixup(m):
        text = m.group(0)
        if text[:2] == "&#":
            # character reference
            try:
                if text[:3] == "&#x":   
                    return unichr(int(text[3:-1], 16)).encode("utf-8")
                else:
                    return unichr(int(text[2:-1])).encode("utf-8")

            except ValueError:
                logger.info("error de valor")
                pass
        else:
            # named entity
            try:
                '''
                if text[1:-1] == "amp":
                    text = "&amp;amp;"
                elif text[1:-1] == "gt":
                    text = "&amp;gt;"
                elif text[1:-1] == "lt":
                    text = "&amp;lt;"
                else:
                    print text[1:-1]
                    text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8")
                '''
                import htmlentitydefs
                text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8")
            except KeyError:
                logger.info("keyerror")
                pass
            except:
                pass
        return text # leave as is
    return re.sub("&#?\w+;", fixup, text)

    # Convierte los codigos html "&ntilde;" y lo reemplaza por "ñ" caracter unicode utf-8
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def update_json_data(dict_node, name_file, node, path=None):
    """
    actualiza el json_data de un fichero con el diccionario pasado

    @param dict_node: diccionario con el nodo
    @type dict_node: dict
    @param name_file: Puede ser el nombre de un canal o server (sin incluir extension)
     o bien el nombre de un archivo json (con extension)
    @type name_file: str
    @param node: nodo a actualizar
    @param path: Ruta base del archivo json. Por defecto la ruta de settings_channels.
    @return result: Devuelve True si se ha escrito correctamente o False si ha dado un error
    @rtype: bool
    @return json_data
    @rtype: dict
    """
    logger.info()

    from core import config
    from core import filetools
    json_data = {}
    result = False

    if not name_file.endswith(".json"):
        name_file += "_data.json"

    if not path:
        path = filetools.join(config.get_data_path(), "settings_channels")

    fname = filetools.join(path, name_file)

    try:
        data = filetools.read(fname)
        dict_data = load_json(data)
        # es un dict
        if dict_data:
            if node in dict_data:
                logger.debug("   existe el key %s" % node)
                dict_data[node] = dict_node
            else:
                logger.debug("   NO existe el key %s" % node)
                new_dict = {node: dict_node}
                dict_data.update(new_dict)
        else:
            logger.debug("   NO es un dict")
            dict_data = {node: dict_node}
        json_data = dump_json(dict_data)
        result = filetools.write(fname, json_data)
    except:
        logger.error("No se ha podido actualizar %s" % fname)

    return result, json_data
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def downloadbest(video_urls, title, continuar=False):
    logger.info()

    # Le da la vuelta, para poner el de más calidad primero ( list() es para que haga una copia )
    invertida = list(video_urls)
    invertida.reverse()

    for elemento in invertida:
        # videotitle = elemento[0]
        url = elemento[1]
        logger.info("Descargando opción " + title + " " + url.encode('ascii', 'ignore'))

        # Calcula el fichero donde debe grabar
        try:
            fullpath = getfilefromtitle(url, title.strip())
        # Si falla, es porque la URL no vale para nada
        except:
            import traceback
            logger.error(traceback.format_exc())
            continue

        # Descarga
        try:
            ret = downloadfile(url, fullpath, continuar=continuar)
        # Llegados a este punto, normalmente es un timeout
        except urllib2.URLError, e:
            import traceback
            logger.error(traceback.format_exc())
            ret = -2

        # El usuario ha cancelado la descarga
        if ret == -1:
            return -1
        else:
            # El fichero ni siquiera existe
            if not os.path.exists(fullpath):
                logger.info("-> No ha descargado nada, probando con la siguiente opción si existe")
            # El fichero existe
            else:
                tamanyo = os.path.getsize(fullpath)

                # Tiene tamaño 0
                if tamanyo == 0:
                    logger.info("-> Descargado un fichero con tamaño 0, probando con la siguiente opción si existe")
                    os.remove(fullpath)
                else:
                    logger.info("-> Descargado un fichero con tamaño %d, lo da por bueno" % tamanyo)
                    return 0

    return -2
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def extract(self, file, dir, folder_to_extract="", overwrite_question=False, backup=False):
        logger.info("file=%s" % file)
        logger.info("dir=%s" % dir)

        if not dir.endswith(':') and not os.path.exists(dir):
            os.mkdir(dir)

        zf = zipfile.ZipFile(file)
        if not folder_to_extract:
            self._createstructure(file, dir)
        num_files = len(zf.namelist())

        for name in zf.namelist():
            logger.info("name=%s" % name)
            if not name.endswith('/'):
                logger.info("no es un directorio")
                try:
                    (path,filename) = os.path.split(os.path.join(dir, name))
                    logger.info("path=%s" % path)
                    logger.info("name=%s" % name)
                    if folder_to_extract:
                        if path != os.path.join(dir, folder):
                            break
                    else:
                        os.makedirs( path )
                except:
                    pass
                if folder_to_extract:
                    outfilename = os.path.join(dir, filename)

                else:
                    outfilename = os.path.join(dir, name)
                logger.info("outfilename=%s" % outfilename)
                try:
                    if os.path.exists(outfilename) and overwrite_question:
                        from platformcode import platformtools
                        dyesno = platformtools.dialog_yesno("El archivo ya existe",
                                                            "El archivo %s a descomprimir ya existe" \
                                                            ", ¿desea sobrescribirlo?" \
                                                            % os.path.basename(outfilename))
                        if not dyesno:
                            break
                        if backup:
                            import time
                            import shutil
                            hora_folder = "Copia seguridad [%s]" % time.strftime("%d-%m_%H-%M", time.localtime())
                            backup = os.path.join(config.get_data_path(), 'backups', hora_folder, folder_to_extract)
                            if not os.path.exists(backup):
                                os.makedirs(backup)
                            shutil.copy2(outfilename, os.path.join(backup, os.path.basename(outfilename)))

                    outfile = open(outfilename, 'wb')
                    outfile.write(zf.read(name))
                except:
                    logger.error("Error en fichero "+name)