Python re 模块,sub() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用re.sub()

项目:pycos    作者:pgiri    | 项目源码 | 文件源码
def __init__(self, node, platform='', cpus=0, memory=0, disk=0):
        if node.find('*') < 0:
            try:
                info = socket.getaddrinfo(node, None)[0]
                ip_addr = info[4][0]
                if info[0] == socket.AF_INET6:
                    ip_addr = re.sub(r'^0*', '', ip_addr)
                    ip_addr = re.sub(r':0*', ':', ip_addr)
                    ip_addr = re.sub(r'::+', '::', ip_addr)
                node = ip_addr
            except:
                node = ''

        if node:
            self.ip_rex = node.replace('.', '\\.').replace('*', '.*')
        else:
            logger.warning('node "%s" is invalid', node)
            self.ip_rex = ''
        self.platform = platform.lower()
        self.cpus = cpus
        self.memory = memory
        self.disk = disk
项目:my-first-blog    作者:AnkurBegining    | 项目源码 | 文件源码
def _glob_to_re(self, pattern):
        """Translate a shell-like glob pattern to a regular expression.

        Return a string containing the regex.  Differs from
        'fnmatch.translate()' in that '*' does not match "special characters"
        (which are platform-specific).
        """
        pattern_re = fnmatch.translate(pattern)

        # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
        # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
        # and by extension they shouldn't match such "special characters" under
        # any OS.  So change all non-escaped dots in the RE to match any
        # character except the special characters (currently: just os.sep).
        sep = os.sep
        if os.sep == '\\':
            # we're using a regex to manipulate a regex, so we need
            # to escape the backslash twice
            sep = r'\\\\'
        escaped = r'\1[^%s]' % sep
        pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
        return pattern_re
项目:HatDecrypter    作者:HatBashBR    | 项目源码 | 文件源码
def decrypt(hash, tipo):
    global word

    try:
        if(tipo == 0):
            url = BeautifulSoup(urllib.urlopen("https://md5.gromweb.com/?md5=" + hash), "html.parser")
        else:
            url = BeautifulSoup(urllib.urlopen("https://sha1.gromweb.com/?hash=" + hash), "html.parser")

        password = url.find("em", {"class": "long-content string"})
        password = re.sub(re.compile("<.*?>"), "", str(password)).strip()
        if str(password) == "None":
            print word+"\t\t\t\t[-] Senha nao encontrada! :-("
        else:
            print word+"\t\t\t\t[+] Senha encontrada: " + password
    except IOError:
       decryptwl(hash, tipo)
项目:socialhome    作者:jaywink    | 项目源码 | 文件源码
def safe_text_for_markdown(text):
    """Clean the text using bleach but keep certain Markdown sections.

    Markdown code ie ` or ``` combos. For single `, do not allow line breaks between the tag.
    Quotes ie '> ' which bleach would clean up.
    """
    code_blocks, text = code_blocks_add_markers(text)
    # Store quotes next
    text = re.sub(r"(^> )", "%%safe_quote_in_start%%", text)
    text = re.sub(r"(\n> )", "%%safe_quote_in_new_line%%", text, flags=re.DOTALL)
    # Nuke all html, scripts, etc
    text = bleach.clean(text or "")
    # Return quotes
    text = text.replace("%%safe_quote_in_start%%", "> ")
    text = text.replace("%%safe_quote_in_new_line%%", "\n> ")
    text = code_blocks_restore(code_blocks, text)
    return text
项目:myhdlpeek    作者:xesscorp    | 项目源码 | 文件源码
def _clean_names(cls):
        '''
        Remove indices from non-repeated peeker names that don't need them.

        When created, all peekers get an index appended to their name to
        disambiguate any repeated names. If the name isn't actually repeated,
        then the index is removed.
        '''

        index_re = '\[\d+\]$'
        for name, peeker in cls._peekers.items():
            if not peeker.name_dup:
                new_name = re.sub(index_re, '', name)
                if new_name != name:
                    peeker.trace.name = new_name
                    cls._peekers[new_name] = cls._peekers.pop(name)
项目:newsreap    作者:caronc    | 项目源码 | 文件源码
def parse_article(self, subject, *args, **kwargs):
        """
        Takes a an article header and returns it's parsed content if it's
        successful. Otherwise it returns None.
        """

        matched = NZB_SUBJECT_PARSE.match(subject)
        if matched is None:
            # subject is not parsable
            return None

        results = {}

        # Trim results
        if matched.group('desc') is not None:
            results['desc'] = re.sub('[\s-]+$', '', matched.group('desc'))
        if matched.group('fname') is not None:
            results['fname'] = matched.group('fname').strip()

        # Support conversion of integers
        for _attr in ['index', 'count', 'yindex', 'ycount', 'size']:
            if matched.group(_attr) is not None:
                results[_attr] = int(matched.group(_attr))

        return results
项目:newsreap    作者:caronc    | 项目源码 | 文件源码
def __fmt_key(self, key):
        """Formats the hash key for more consistent hits; hence fetching the
        'Message-ID' key should still be fetched even if the user indexes
        with 'message-id'.
        """
        def _fmt(_k):
            return _k.group(1) + _k.group(2).upper()

        if not isinstance(key, basestring):
            # Handle invalid key entries types
            key = str(key)

        key = re.sub(
            # Flip -id to ID (short for Identifier)
            # Flip -crc to CRC (short for Cyclic Redundancy Check)
            r'([_-])((id|crc)([^a-z0-9]|$))',
            _fmt,
            re.sub(r'(^|\s|[_-])(\S)', _fmt, key.strip().lower()),
            flags=re.IGNORECASE,
        )
        if key in VALID_HEADER_ENTRIES or key.startswith(UNKNOWN_PREFIX):
            return key
        return UNKNOWN_PREFIX + key
项目:xr-telemetry-m2m-web    作者:cisco    | 项目源码 | 文件源码
def process_reply(reply, nested=False):
    """
    Process a reply so it looks nice:

    - if it's from the prototype yang integration, ditch the 'data' root
    - convert from list to nested format if requested
    - convert quotes to avoid escaping
    """
    try:
        # @@@ strip 'data' from yang output
        reply['result'] = reply['result'][0]['data'] 
    except Exception:
        pass

    # If required, and query successful, convert the reply['result'] format.
    try:
        if nested:
            reply['result'] = reformat(reply['result'])
    except KeyError:
        # Fails silently if there is no 'reply['result']' in the reply['result'], this
        # means an error occurred.
        pass

    # @@@ cheesily try to avoid \" everywhere, at cost of valid json
    return re.sub(r'\\"', "'", json.dumps(reply))
项目:xr-telemetry-m2m-web    作者:cisco    | 项目源码 | 文件源码
def handle_misses(self, request, mapper, map_misses):
        self.num_misses = len(map_misses)
        self.found_values = []
        for i, miss in enumerate(map_misses):
            progress_indicator(
                request,
                "Incomplete mapping from cache, calculating remainder [{}/{}]..."
                    .format(i + 1, self.num_misses))
            print('### FETCHING MISS {}/{}: {}'.format(i, self.num_misses, miss))
            d = scrape.schema_describe('config ' + miss, request.sdata)
            def got_miss_reply(result, key):
                result = [re.sub("'", '"', path) for path in result]
                if len(result) == 0:
                    result = None
                print('### GOT RESULT ({} remaining, {} saved) {} -> {}'.format(
                        self.num_misses, len(self.found_values), key, result))
                self.found_values.append((key, result))
                self.num_misses -= 1
                if self.num_misses == 0:
                    # Got everything!
                    mapper.populate_cache(self.found_values)
                    self.attempt_map(request, second_attempt=True)
            d.addCallback(got_miss_reply, miss)
项目:python-    作者:secondtonone1    | 项目源码 | 文件源码
def matches(self, testString, parseAll=True):
        """
        Method for quick testing of a parser against a test string. Good for simple 
        inline microtests of sub expressions while building up larger parser.

        Parameters:
         - testString - to test against this expression for a match
         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests

        Example::
            expr = Word(nums)
            assert expr.matches("100")
        """
        try:
            self.parseString(_ustr(testString), parseAll=parseAll)
            return True
        except ParseBaseException:
            return False
项目:python-    作者:secondtonone1    | 项目源码 | 文件源码
def matches(self, testString, parseAll=True):
        """
        Method for quick testing of a parser against a test string. Good for simple 
        inline microtests of sub expressions while building up larger parser.

        Parameters:
         - testString - to test against this expression for a match
         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests

        Example::
            expr = Word(nums)
            assert expr.matches("100")
        """
        try:
            self.parseString(_ustr(testString), parseAll=parseAll)
            return True
        except ParseBaseException:
            return False
项目:python-    作者:secondtonone1    | 项目源码 | 文件源码
def _collect_zipimporter_cache_entries(normalized_path, cache):
    """
    Return zipimporter cache entry keys related to a given normalized path.

    Alternative path spellings (e.g. those using different character case or
    those using alternative path separators) related to the same path are
    included. Any sub-path entries are included as well, i.e. those
    corresponding to zip archives embedded in other zip archives.

    """
    result = []
    prefix_len = len(normalized_path)
    for p in cache:
        np = normalize_path(p)
        if (np.startswith(normalized_path) and
                np[prefix_len:prefix_len + 1] in (os.sep, '')):
            result.append(p)
    return result
项目:ochem_predict_nn    作者:connorcoley    | 项目源码 | 文件源码
def reassign_atom_mapping(transform):
    '''This function takes an atom-mapped reaction and reassigns 
    the atom-mapping labels (numbers) from left to right, once 
    that transform has been canonicalized.'''

    all_labels = re.findall('\:([0-9]+)\]', transform)

    # Define list of replacements which matches all_labels *IN ORDER*
    replacements = []
    replacement_dict = {}
    counter = 1
    for label in all_labels: # keep in order! this is important
        if label not in replacement_dict:
            replacement_dict[label] = str(counter)
            counter += 1
        replacements.append(replacement_dict[label])

    # Perform replacements in order
    transform_newmaps = re.sub('\:[0-9]+\]', 
        lambda match: (':' + replacements.pop(0) + ']'),
        transform)

    return transform_newmaps
项目:PyWallet    作者:AndreMiras    | 项目源码 | 文件源码
def select_overview_subtab(self):
        """
        Selects the overview sub tab.
        """
        # this is what we would normally do:
        # tab_manager.current = 'overview'
        # but instead we need to simulate the click on the
        # navigation bar children or the associated screen button
        # would not have the selected color
        overview_bnavigation = self.overview_bnavigation
        navigation_bar = overview_bnavigation.children[0]
        boxlayout = navigation_bar.children[0]
        nav_headers = boxlayout.children
        # the overview is the first/last button
        overview_nav_header = nav_headers[-1]
        overview_nav_header.dispatch('on_press')
项目:python-opilo-script    作者:SEEDTEAM    | 项目源码 | 文件源码
def sms():
    if len(sys.argv) < 2:
        print(help_text)
        return
    if sys.argv[1] == "send":
        if len(sys.argv) < 3:
            print("????? ????? ?? ?? ???.")
            return
        if not re.match(r"[\+98|0]9[0-9]*",sys.argv[2]):
            print("????? ???? ??? ?????? ???.tetete")
            return
        number = sys.argv[2]
        if re.match(sys.argv[2], r"^\+98"):
            number = re.sub("+98", "0", number)
        text = sys.argv[3]
        if len(text) > 100:
            print("????? ??????? ??? ??? ????? ???? ???.")
            return
        send_sms(number, text, str(time.time()))
        return
    if sys.argv[1] == "credits":
        get_credits()
        return
项目:tailbiter    作者:darius    | 项目源码 | 文件源码
def expand(chunks, name, version):
    "Return the named chunk with any chunk-references recursively expanded."
    template, latest = '', -1
    for v in range(version+1):
        t = chunks.get(name + ' v%d+' % v, '')
        if t:
            latest = v
            template += t
    for v in range(version, latest, -1):
        t = chunks.get(name + ' v%d' % v, '')
        if t:
            template += t
            break
    if not template:
        template = chunks[name]
    return chunk_ref_pattern.sub(
        lambda mo: indent(mo.group(1), expand(chunks, mo.group(2), version)),
        template)
项目:tashaphyne    作者:linuxscout    | 项目源码 | 文件源码
def strip_harakat(text):
    """Strip Harakat from arabic word except Shadda.
    The striped marks are :
        - FATHA,  DAMMA,  KASRA
        - SUKUN
        - FATHATAN,  DAMMATAN,  KASRATAN,  ,  ,  .
    Example:
        >>> text = u"?????????????"
        >>> stripTashkeel(text)
        ????????
    @param text: arabic text.
    @type text: unicode.
    @return: return a striped text.
    @rtype: unicode.
    """
    # if text:
        # return  re.sub(HARAKAT_PATTERN, u'', text)
    # return text
    if not text:
        return text
    elif is_vocalized(text): 
        for char in HARAKAT:
            text  = text.replace(char, '')
    return text
项目:tashaphyne    作者:linuxscout    | 项目源码 | 文件源码
def strip_lastharaka(text):
    """Strip the last Haraka from arabic word except Shadda.
    The striped marks are :
        - FATHA,  DAMMA,  KASRA
        - SUKUN
        - FATHATAN,  DAMMATAN,  KASRATAN,  ,  ,  .
    Example:
        >>> text = u"?????????????"
        >>> stripTashkeel(text)
        ????????????
    @param text: arabic text.
    @type text: unicode.
    @return: return a striped text.
    @rtype: unicode.
    """
    if text:
        if is_vocalized(text):
            return  re.sub(LASTHARAKA_PATTERN, u'', text)
    return text
项目:tashaphyne    作者:linuxscout    | 项目源码 | 文件源码
def normalize_ligature(text):
    """Normalize Lam Alef ligatures into two letters (LAM and ALEF), 
     and Tand return a result text.
    Some systems present lamAlef ligature as a single letter,  
    this function convert it into two letters, 
    The converted letters into  LAM and ALEF are :
        - LAM_ALEF,  LAM_ALEF_HAMZA_ABOVE,  LAM_ALEF_HAMZA_BELOW,  LAM_ALEF_MADDA_ABOVE
    Example:
        >>> text = u"????? ???? ???????"
        >>> normalizeLigature(text)
        ????? ???? ???????

    @param text: arabic text.
    @type text: unicode.
    @return: return a converted text.
    @rtype: unicode.
    """
    if text:
        return LIGUATURES_PATTERN.sub(u'%s%s'%(LAM, ALEF),  text)
    return text
项目:tashaphyne    作者:linuxscout    | 项目源码 | 文件源码
def normalize_hamza(word):
    """Standardize the Hamzat into one form of hamza, 
    replace Madda by hamza and alef.
    Replace the LamAlefs by simplified letters.
    Example:
        >>> text = u"??? ??? ??????"
        >>> normalizeHamza(text)
        ??? ??? ??????

    @param word: arabic text.
    @type word: unicode.
    @return: return a converted text.
    @rtype: unicode.
    """
    if word.startswith(ALEF_MADDA):
        if len(word)>= 3 and (word[1] not in HARAKAT) and \
                     (word[2] == SHADDA or len(word) == 3):
            word = HAMZA + ALEF + word[1:]
        else:
            word = HAMZA + HAMZA + word[1:]
    # convert all Hamza from into one form
    word = word.replace(ALEF_MADDA, HAMZA+HAMZA)
    word = HAMZAT_PATTERN.sub(HAMZA, word)
    return word
项目:tashaphyne    作者:linuxscout    | 项目源码 | 文件源码
def tokenize(text = u""):
    """
    Tokenize text into words
    @param text: the input text.
    @type text: unicode.
    @return: list of words.
    @rtype: list.
    """
    if text == u'':
        return []
    else:
        #split tokens
        mylist = TOKEN_PATTERN.split(text)
        # don't remove newline \n
        mylist = [TOKEN_REPLACE.sub('',x) for x in mylist if x]            
        # remove empty substring
        mylist = [x for x in mylist if x]
        return mylist
项目:tashaphyne    作者:linuxscout    | 项目源码 | 文件源码
def strip_tashkeel(text):
    """Strip vowel from a text and return a result text.
    The striped marks are :
        - FATHA, DAMMA, KASRA
        - SUKUN
        - SHADDA
        - FATHATAN, DAMMATAN, KASRATAN, , , .
    Example:
        >>> text=u"?????????????"
        >>> strip_tashkeel(text)
        ???????

    @param text: arabic text.
    @type text: unicode.
    @return: return a striped text.
    @rtype: unicode.
    """
    return arabconst.HARAKAT_PAT.sub('', text)


#strip tatweel from a text and return a result text
#--------------------------------------
项目:tashaphyne    作者:linuxscout    | 项目源码 | 文件源码
def strip_tatweel(text):
    """
    Strip tatweel from a text and return a result text.

    Example:
        >>> text=u"????????????"
        >>> strip_tatweel(text)
        ???????

    @param text: arabic text.
    @type text: unicode.
    @return: return a striped text.
    @rtype: unicode.
    """
    return re.sub(u'[%s]' % arabconst.TATWEEL, '', text)


#--------------------------------------
项目:tashaphyne    作者:linuxscout    | 项目源码 | 文件源码
def normalize_hamza(text):
    """Normalize Hamza forms into one form, and return a result text.
    The converted letters are :
        - The converted lettersinto HAMZA are: WAW_HAMZA,YEH_HAMZA
        - The converted lettersinto ALEF are: ALEF_MADDA,
        ALEF_HAMZA_ABOVE, ALEF_HAMZA_BELOW ,HAMZA_ABOVE, HAMZA_BELOW

    Example:
        >>> text=u"?????? ?? ??????"
        >>> normalize_hamza(text)
        ?????? ?? ??????

    @param text: arabic text.
    @type text: unicode.
    @return: return a converted text.
    @rtype: unicode.
    """
    text = arabconst.ALEFAT_PAT.sub(arabconst.ALEF, text)
    return arabconst.HAMZAT_PAT.sub(arabconst.HAMZA, text)

#--------------------------------------
项目:tashaphyne    作者:linuxscout    | 项目源码 | 文件源码
def normalize_lamalef(text):
    """Normalize Lam Alef ligatures into two letters (LAM and ALEF),
    and return a result text.
    Some systems present lamAlef ligature as a single letter,
    this function convert it into two letters,
    The converted letters into  LAM and ALEF are :
        - LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_HAMZA_BELOW,
         LAM_ALEF_MADDA_ABOVE

    Example:
        >>> text=u"????? ???? ???????"
        >>> normalize_lamalef(text)
        ????? ???? ???????

    @param text: arabic text.
    @type text: unicode.
    @return: return a converted text.
    @rtype: unicode.
    """
    return arabconst.LAMALEFAT_PAT.sub(\
      u'%s%s'%(arabconst.LAM, arabconst.ALEF), text)

#--------------------------------------
项目:my-first-blog    作者:AnkurBegining    | 项目源码 | 文件源码
def matches(self, testString, parseAll=True):
        """
        Method for quick testing of a parser against a test string. Good for simple 
        inline microtests of sub expressions while building up larger parser.

        Parameters:
         - testString - to test against this expression for a match
         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests

        Example::
            expr = Word(nums)
            assert expr.matches("100")
        """
        try:
            self.parseString(_ustr(testString), parseAll=parseAll)
            return True
        except ParseBaseException:
            return False
项目:my-first-blog    作者:AnkurBegining    | 项目源码 | 文件源码
def matches(self, testString, parseAll=True):
        """
        Method for quick testing of a parser against a test string. Good for simple 
        inline microtests of sub expressions while building up larger parser.

        Parameters:
         - testString - to test against this expression for a match
         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests

        Example::
            expr = Word(nums)
            assert expr.matches("100")
        """
        try:
            self.parseString(_ustr(testString), parseAll=parseAll)
            return True
        except ParseBaseException:
            return False
项目:my-first-blog    作者:AnkurBegining    | 项目源码 | 文件源码
def _collect_zipimporter_cache_entries(normalized_path, cache):
    """
    Return zipimporter cache entry keys related to a given normalized path.

    Alternative path spellings (e.g. those using different character case or
    those using alternative path separators) related to the same path are
    included. Any sub-path entries are included as well, i.e. those
    corresponding to zip archives embedded in other zip archives.

    """
    result = []
    prefix_len = len(normalized_path)
    for p in cache:
        np = normalize_path(p)
        if (np.startswith(normalized_path) and
                np[prefix_len:prefix_len + 1] in (os.sep, '')):
            result.append(p)
    return result
项目:picoCTF    作者:picoCTF    | 项目源码 | 文件源码
def sanitize_name(name):
    """
    Sanitize a given name such that it conforms to unix policy.

    Args:
        name: the name to sanitize.

    Returns:
        The sanitized form of name.
    """

    if len(name) == 0:
        raise Exception("Can not sanitize an empty field.")

    sanitized_name = re.sub(r"[^a-z0-9\+-]", "-", name.lower())

    if sanitized_name[0] in string.digits:
        sanitized_name = "p" + sanitized_name

    return sanitized_name

#I will never understand why the shutil functions act
#the way they do...
项目:OpenCouture-Dev    作者:9-9-0    | 项目源码 | 文件源码
def addToCart(self):
        print '\nADD TO CART -----------------'
        session_get = self.user_session.get(self.URL_product_url, headers=self.get_headers)
        #print session_get.content
        soup = BeautifulSoup(session_get.content, 'lxml')

        results = soup.find_all('select', class_='size-select')
        #print results

        for item in results[0].select('option'):
            re_result = re.sub(self.sub_pattern, '', item.string)
            #print re_result
            matchObj = re.search(r"^%s+$" % self.user_size, re_result)
            if matchObj:
                self.post_data_addToCart['pid'] = item['value']
                self.post_data_addToCart['masterPID'] = item['value'].partition("_")[0]
                print self.post_data_addToCart
                break

        session_post = self.user_session.post(url=self.URL_cart_post_url, headers=self.post_headers, data=self.post_data_addToCart)
        print 'Add To Cart Status: ' + str(session_post.status_code)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def process_story(text):
  """Processed a story text into an (article, summary) tuple.
  """
  # Split by highlights
  elements = text.split("@highlight")
  elements = [_.strip() for _ in elements]

  story_text = elements[0]
  highlights = elements[1:]

  # Join all highlights into a single blob
  highlights_joined = "; ".join(highlights)
  highlights_joined = re.sub(r"\s+", " ", highlights_joined)
  highlights_joined = highlights_joined.strip()

  # Remove newlines from story
  # story_text = story_text.replace("\n", " ")
  story_text = re.sub(r"\s+", " ", story_text)
  story_text = story_text.strip()

  return story_text, highlights_joined
项目:database_assetstore    作者:OpenGeoscience    | 项目源码 | 文件源码
def _get_column_info(self, name, format_type, *args, **kwargs):
    """
    When the PGDialect or its subclasses get column information, if the type
    is unknown and certain conditions are met, create a new type dynamically.
    This wraps the original function (see sqlalchemy's
    dialects/postgresql/base.py file).
    """
    attype = re.sub(r'\(.*\)', '', format_type)
    attype = re.sub(r'\[\]', '', attype)
    info = gci(self, name, format_type, *args, **kwargs)
    if (info['type'] == sqlalchemy.sql.sqltypes.NULLTYPE and
            attype.lower() not in dialect.base.ischema_names and
            attype not in KnownTypes):
        newtype = type(str(attype), (DynamicType,), {'name': str(attype)})
        newtype.__visit_name__ = attype
        dialect.base.ischema_names[attype.lower()] = newtype
        KnownTypes[attype] = newtype
        info = gci(self, name, format_type, *args, **kwargs)
    return info
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def format_name(display_name, prefix, prefixes, prefix_format_func=None):
    if prefix is not None and prefix_format_func is not None:
        prefix = prefix_format_func(prefix)

    # Default multi -> '' if no format func given
    if prefix_format_func is None and prefix == cr_constants.MULTI_REFS_PREFIX:
        prefix = ''

    if len(prefixes) > 1 or '%s' in display_name:
        display_name = add_prefix(prefix, display_name)

    # Replace underscores w/ spaces
    display_name = display_name.replace('_', ' ')

    # Collapse whitespace
    display_name = re.sub('\s+', ' ', display_name)

    return display_name
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def format_description(description, prefix, prefixes, prefix_format_func=None):
    if prefix is not None and prefix_format_func is not None:
        prefix = prefix_format_func(prefix)

    # Default multi -> '' if no format func given
    if prefix_format_func is None and prefix == cr_constants.MULTI_REFS_PREFIX:
        prefix = ''

    if '%s' in description:
        # Escape stray percents
        description = re.sub('%([^s])', '%%\\1', description)

        # Only add the prefix if there are multiple possibilities
        s = str(prefix) if len(prefixes) > 1 and prefix else ''
        description = description % s

    # Collapse whitespace
    description = re.sub('\s+', ' ', description)

    return description
项目:data_pipeline    作者:Yelp    | 项目源码 | 文件源码
def __init__(self, additional_compose_file=None, additional_services=None):
        # To resolve docker client server version mismatch issue.
        os.environ["COMPOSE_API_VERSION"] = "auto"
        dir_name = os.path.split(os.getcwd())[-1]
        self.project = "{}{}".format(
            re.sub(r'[^a-z0-9]', '', dir_name.lower()),
            getpass.getuser()
        )
        self.additional_compose_file = additional_compose_file

        self.services = ["zookeeper", "schematizer", "kafka"]

        if additional_services is not None:
            self.services.extend(additional_services)

        # This variable is meant to capture the running/not-running state of
        # the dependent testing containers when tests start running.  The idea
        # is, we'll only start and stop containers if they aren't already
        # running.  If they are running, we'll just use the ones that exist.
        # It takes a while to start all the containers, so when running lots of
        # tests, it's best to start them out-of-band and leave them up for the
        # duration of the session.
        self.containers_already_running = self._are_containers_already_running()
项目:BitBot    作者:crack00r    | 项目源码 | 文件源码
def infer_id(self):
        representation = self.__repr__(ignore_id=True)

        # Clean the representation
        representation = representation\
            .replace(':bytes ', ':string ')\
            .replace('?bytes ', '?string ')\
            .replace('<', ' ').replace('>', '')\
            .replace('{', '').replace('}', '')

        representation = re.sub(
            r' \w+:flags\.\d+\?true',
            r'',
            representation
        )
        return crc32(representation.encode('ascii'))
项目:jobs    作者:josiahcarlson    | 项目源码 | 文件源码
def _check_inputs_and_outputs(fcn):
    @functools.wraps(fcn)
    def call(conn, inputs, outputs, identifier, *a, **kw):
        assert isinstance(inputs, (list, tuple, set)), inputs
        assert isinstance(outputs, (list, tuple, set)), outputs
        assert '' not in inputs, inputs
        assert '' not in outputs, outputs
        # this is for actually locking inputs/outputs
        inputs, outputs = list(map(str, inputs)), list(map(str, outputs))
        locks = inputs + [''] + outputs

        if kw.pop('history', None):
            igraph = [EDGE_RE.sub('*', inp) for inp in inputs]
            ograph = [EDGE_RE.sub('*', out) for out in outputs]
            graph_id = EDGE_RE.sub('*', str(identifier))
            graph = igraph + [''] + ograph + ['', graph_id]
            if all(x.startswith('test.') for x in igraph + ograph):
                graph = ['', '']
        else:
            graph = ['', '']

        return fcn(conn, locks, graph, str(identifier), *a, **kw)
    return call
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def _activation_summary(x):
  """Helper to create summaries for activations.

  Creates a summary that provides a histogram of activations.
  Creates a summary that measure the sparsity of activations.

  Args:
    x: Tensor
  Returns:
    nothing
  """
  # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
  # session. This helps the clarity of presentation on tensorboard.
  tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
  # tf.histogram_summary(tensor_name + '/activations', x)
  tf.summary.histogram(tensor_name + '/activations', x)
  # tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
  tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def _activation_summary(x):
  """Helper to create summaries for activations.

  Creates a summary that provides a histogram of activations.
  Creates a summary that measure the sparsity of activations.

  Args:
    x: Tensor
  Returns:
    nothing
  """
  # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
  # session. This helps the clarity of presentation on tensorboard.
  tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
  # tf.histogram_summary(tensor_name + '/activations', x)
  tf.summary.histogram(tensor_name + '/activations', x)
  # tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
  tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
项目:kinect-2-libras    作者:inessadl    | 项目源码 | 文件源码
def glob_to_re(pattern):
    """Translate a shell-like glob pattern to a regular expression.

    Return a string containing the regex.  Differs from
    'fnmatch.translate()' in that '*' does not match "special characters"
    (which are platform-specific).
    """
    pattern_re = fnmatch.translate(pattern)

    # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
    # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
    # and by extension they shouldn't match such "special characters" under
    # any OS.  So change all non-escaped dots in the RE to match any
    # character except the special characters.
    # XXX currently the "special characters" are just slash -- i.e. this is
    # Unix-only.
    pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', r'\1[^/]', pattern_re)

    return pattern_re
项目:kinect-2-libras    作者:inessadl    | 项目源码 | 文件源码
def subst_vars (s, local_vars):
    """Perform shell/Perl-style variable substitution on 'string'.  Every
    occurrence of '$' followed by a name is considered a variable, and
    variable is substituted by the value found in the 'local_vars'
    dictionary, or in 'os.environ' if it's not in 'local_vars'.
    'os.environ' is first checked/augmented to guarantee that it contains
    certain values: see 'check_environ()'.  Raise ValueError for any
    variables not found in either 'local_vars' or 'os.environ'.
    """
    check_environ()
    def _subst (match, local_vars=local_vars):
        var_name = match.group(1)
        if var_name in local_vars:
            return str(local_vars[var_name])
        else:
            return os.environ[var_name]

    try:
        return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
    except KeyError, var:
        raise ValueError, "invalid variable '$%s'" % var

# subst_vars ()
项目:kinect-2-libras    作者:inessadl    | 项目源码 | 文件源码
def formataddr(pair):
    """The inverse of parseaddr(), this takes a 2-tuple of the form
    (realname, email_address) and returns the string value suitable
    for an RFC 2822 From, To or Cc header.

    If the first element of pair is false, then the second element is
    returned unmodified.
    """
    name, address = pair
    if name:
        quotes = ''
        if specialsre.search(name):
            quotes = '"'
        name = escapesre.sub(r'\\\g<0>', name)
        return '%s%s%s <%s>' % (quotes, name, quotes, address)
    return address
项目:kinect-2-libras    作者:inessadl    | 项目源码 | 文件源码
def join_header_words(lists):
    """Do the inverse (almost) of the conversion done by split_header_words.

    Takes a list of lists of (key, value) pairs and produces a single header
    value.  Attribute values are quoted if needed.

    >>> join_header_words([[("text/plain", None), ("charset", "iso-8859/1")]])
    'text/plain; charset="iso-8859/1"'
    >>> join_header_words([[("text/plain", None)], [("charset", "iso-8859/1")]])
    'text/plain, charset="iso-8859/1"'

    """
    headers = []
    for pairs in lists:
        attr = []
        for k, v in pairs:
            if v is not None:
                if not re.search(r"^\w+$", v):
                    v = HEADER_JOIN_ESCAPE_RE.sub(r"\\\1", v)  # escape " and \
                    v = '"%s"' % v
                k = "%s=%s" % (k, v)
            attr.append(k)
        if attr: headers.append("; ".join(attr))
    return ", ".join(headers)
项目:Instagram    作者:Fastcampus-WPS-5th    | 项目源码 | 文件源码
def make_html_content_and_add_tags(self):
        # ????? ???? ?????
        p = re.compile(r'(#\w+)')
        # findall???? ???? ????? ???
        tag_name_list = re.findall(p, self.content)
        # ?? content(Comment??)? ??? ??
        ori_content = self.content
        # ????? ????
        for tag_name in tag_name_list:
            # Tag??? ????? ??, ????? ???? ????? _??
            tag, _ = Tag.objects.get_or_create(name=tag_name.replace('#', ''))
            # ?? content? ??? ??
            change_tag = '<a href="{url}" class="hash-tag">{tag_name}</a>'.format(
                # url=reverse('post:hashtag_post_list', args=[tag_name.replace('#', '')]),
                url=reverse('post:hashtag_post_list',
                            kwargs={'tag_name': tag_name.replace('#', '')}),
                tag_name=tag_name
            )
            ori_content = re.sub(r'{}(?![<\w])'.format(tag_name), change_tag, ori_content, count=1)
            # content? ??? Tag??? ??? tags??? ??
            if not self.tags.filter(pk=tag.pk).exists():
                self.tags.add(tag)
        # ??? ??? ???? html_content? ??
        self.html_content = ori_content
        super().save(update_fields=['html_content'])
项目:ln2sql    作者:FerreroJeremy    | 项目源码 | 文件源码
def _clean_output(s):
    s = s.split("SELECT")[1]  # remove table schema
    s = re.sub("\\033.*?m", "", s)  # remove color codes
    s = s.replace('\n', ' ')  # remove '\n'
    s = s.split(';')[0]  # remove spaces after ;
    s = "SELECT" + s + ';'  # put back lost SELECT and ';'
    return s
项目:ln2sql    作者:FerreroJeremy    | 项目源码 | 文件源码
def _clean_output(s):
    s = s.split("SELECT")[1]  # remove table schema
    s = re.sub("\\033.*?m", "", s)  # remove color codes
    s = s.replace('\n', ' ')  # remove '\n'
    s = s.split(';')[0]  # remove spaces after ;
    s = "SELECT" + s + ';'  # put back lost SELECT and ';'
    return s
项目:pycos    作者:pgiri    | 项目源码 | 文件源码
def __init__(self, node, platform='', cpus=0, memory=0, disk=0):
        if node.find('*') < 0:
            try:
                info = socket.getaddrinfo(node, None)[0]
                ip_addr = info[4][0]
                if info[0] == socket.AF_INET6:
                    ip_addr = re.sub(r'^0*', '', ip_addr)
                    ip_addr = re.sub(r':0*', ':', ip_addr)
                    ip_addr = re.sub(r'::+', '::', ip_addr)
                node = ip_addr
            except:
                node = ''

        if node:
            self.ip_rex = node.replace('.', '\\.').replace('*', '.*')
        else:
            logger.warning('node "%s" is invalid', node)
            self.ip_rex = ''
        self.platform = platform.lower()
        self.cpus = cpus
        self.memory = memory
        self.disk = disk
项目:django-openapi-gen    作者:Ecognize    | 项目源码 | 文件源码
def make_regex(self, path, named = False):
        regex = re.sub(SWAGGER_PARAMS_REGEX, DJANGO_PARAMS_STRING, path) if named else path
        regex = re.sub(URL_SLASHES_REGEX, DJANGO_URL_SUBSTRING, regex)

        return regex

    #: create handler ready for urlization
项目:linux-soft-exploit-suggester    作者:belane    | 项目源码 | 文件源码
def cleanName(soft_name):
    """ Clean package name from common strings """
    for badword in badpackages:
        soft_name = re.sub(r'-' + badword, '', soft_name)
    return soft_name
项目:socialhome    作者:jaywink    | 项目源码 | 文件源码
def fetch_oembed_preview(content, urls):
    """Fetch first oembed content for a list of urls."""
    for url in urls:
        # See first if recently cached already
        if OEmbedCache.objects.filter(url=url, modified__gte=now()-datetime.timedelta(days=7)).exists():
            oembed = OEmbedCache.objects.get(url=url)
            Content.objects.filter(id=content.id).update(oembed=oembed)
            return oembed
        # Fetch oembed
        options = {}
        if url.startswith("https://twitter.com/"):
            # This probably has little effect since we fetch these on the backend...
            # But, DNT is always good to communicate if possible :)
            options = {"dnt": "true"}
        try:
            oembed = PyEmbed(discoverer=OEmbedDiscoverer()).embed(url, **options)
        except (PyEmbedError, PyEmbedDiscoveryError, PyEmbedConsumerError, ValueError):
            continue
        if not oembed:
            continue
        # Ensure width is 100% not fixed
        oembed = re.sub(r'width="[0-9]*"', 'width="100%"', oembed)
        oembed = re.sub(r'height="[0-9]*"', "", oembed)
        try:
            with transaction.atomic():
                oembed = OEmbedCache.objects.create(url=url, oembed=oembed)
        except IntegrityError:
            # Some other process got ahead of us
            oembed = OEmbedCache.objects.get(url=url)
            Content.objects.filter(id=content.id).update(oembed=oembed)
            return oembed
        Content.objects.filter(id=content.id).update(oembed=oembed)
        return oembed
    return False