Python mechanize 模块,Browser() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用mechanize.Browser()

项目:WebGmxChecker    作者:SUP3RIA    | 项目源码 | 文件源码
def login(i,j,ur):
    ua = UserAgent()
    cookiejar =cookielib.LWPCookieJar()
    br = mechanize.Browser()
    br.set_cookiejar(cookiejar)
    #br.set_debug_http(True)
    br.set_handle_robots(False)
    br.set_handle_refresh(False)
    br.addheaders = [('User-Agent', ua.random), ('Accept', '*/*')]
    url = "http://www."+ur+"/"
    br.open(url)
    br.select_form(nr = 1)
    br.form['username'] = i
    br.form['password'] = j
    br.submit()
    if len(br.geturl()) == 72:
        return True
    else:
        return False
项目:SPSE    作者:ioef    | 项目源码 | 文件源码
def urlOpen(url,username, password, submitted):
    global cookiejar
    #Check if the function is called for the 1st time
    #if it is not reuse the cookie previously created   
    if submitted == True:
        br = mechanize.Browser()
        br.set_handle_robots(False)
        br.set_cookiejar(cookiejar) 
        br.open(url)

        #select the 1st form
        br.select_form(nr=0)
        #fill in the credentials
        form['username'] = username
        form['password'] = password
        br.submit()
    else:
        br2 = mechanize.Browser()
        br.set_handle_robots(False)
        br2.set_cookiejar(cookiejar)
        br2.open(url)
项目:llk    作者:Tycx2ry    | 项目源码 | 文件源码
def get_browser(self):
        '''Returns a mechanize.Browser object configured with the framework's global options.'''
        br = mechanize.Browser()
        # set the user-agent header
        br.addheaders = [('User-agent', self._global_options['user-agent'])]
        # set debug options
        if self._global_options['verbosity'] >= 2:
            br.set_debug_http(True)
            br.set_debug_redirects(True)
            br.set_debug_responses(True)
        # set proxy
        if self._global_options['proxy']:
            br.set_proxies({'http': self._global_options['proxy'], 'https': self._global_options['proxy']})
        # additional settings
        br.set_handle_robots(False)
        # set timeout
        socket.setdefaulttimeout(self._global_options['timeout'])
        return br
项目:WebGmxChecker    作者:SUP3RIA    | 项目源码 | 文件源码
def login(i,j,ur):
    ua = UserAgent()
    cookiejar =cookielib.LWPCookieJar()
    br = mechanize.Browser()
    br.set_cookiejar(cookiejar)
    #br.set_debug_http(True)
    br.set_handle_robots(False)
    br.set_handle_refresh(False)
    br.addheaders = [('User-Agent', ua.random), ('Accept', '*/*')]
    url = "http://www."+ur+"/"
    br.open(url)
    br.select_form(nr = 1)
    br.form['username'] = i
    br.form['password'] = j
    br.submit()
    if len(br.geturl()) == 72:
        return True
    else:
        return False
项目:WebScraping    作者:liinnux    | 项目源码 | 文件源码
def mechanize_edit():
    """Use mechanize to increment population
    """
    # login
    br = mechanize.Browser()
    br.open(login.LOGIN_URL)
    br.select_form(nr=0)
    print br.form
    br['email'] = login.LOGIN_EMAIL
    br['password'] = login.LOGIN_PASSWORD
    response = br.submit()

    # edit country
    br.open(COUNTRY_URL)
    br.select_form(nr=0)
    print 'Population before:', br['population']
    br['population'] = str(int(br['population']) + 1)
    br.submit()

    # check population increased
    br.open(COUNTRY_URL)
    br.select_form(nr=0)
    print 'Population after:', br['population']
项目:Crawler    作者:xinhaojing    | 项目源码 | 文件源码
def __init__(self, headers = {}, debug = True, p = ''):
        self.timeout = 10  
        self.br = mechanize.Browser() #???br
        self.cj = cookielib.LWPCookieJar()
        self.br.set_cookiejar(self.cj)#??cookie
        self.br.set_handle_equiv(True)#????http equiv
        self.br.set_handle_gzip(True)#??????
        self.br.set_handle_redirect(True)#???????
        self.br.set_handle_referer(True)#??????referer
        self.br.set_handle_robots(False)#????robots??
        self.br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
        self.br.addheaders = [('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36')]
        self.debug = debug
        #debug?????????????
        if self.debug:
            self.br.set_debug_http(True)
            self.br.set_debug_redirects(True) 
            self.br.set_debug_responses(True)
        #headers
        for keys in headers.keys():
            self.br.addheaders += [(key, headers[key]), ]
        #proxy
        if len(p) > 0 and p != 'None' and p != None and p != 'NULL':
            self.br.set_proxies({'http': p})
项目:recon-ng    作者:Hehe-Zhc    | 项目源码 | 文件源码
def get_browser(self):
        '''Returns a mechanize.Browser object configured with the framework's global options.'''
        br = mechanize.Browser()
        # set the user-agent header
        br.addheaders = [('User-agent', self._global_options['user-agent'])]
        # set debug options
        if self._global_options['debug']:
            br.set_debug_http(True)
            br.set_debug_redirects(True)
            br.set_debug_responses(True)
        # set proxy
        if self._global_options['proxy']:
            br.set_proxies({'http': self._global_options['proxy'], 'https': self._global_options['proxy']})
        # additional settings
        br.set_handle_robots(False)
        # set timeout
        socket.setdefaulttimeout(self._global_options['timeout'])
        return br
项目:sopel-modules    作者:normcyr    | 项目源码 | 文件源码
def login(username, password):
    br = mechanize.Browser()
    login_url = "http://cyclebabac.com/wp-login.php"

    # Cookie Jar
    cj = cookielib.LWPCookieJar()
    br.set_cookiejar(cj)

    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)

    # Want debugging messages? Uncomment this
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # Perform the actual login
    br.open(login_url)
    br.select_form(nr=0)
    br.form['log'] = str(username)
    br.form['pwd'] = str(password)
    br.submit()

    return br
项目:recon-ng    作者:captainhooligan    | 项目源码 | 文件源码
def get_browser(self):
        '''Returns a mechanize.Browser object configured with the framework's global options.'''
        br = mechanize.Browser()
        # set the user-agent header
        br.addheaders = [('User-agent', self._global_options['user-agent'])]
        # set debug options
        if self._global_options['verbosity'] >= 2:
            br.set_debug_http(True)
            br.set_debug_redirects(True)
            br.set_debug_responses(True)
        # set proxy
        if self._global_options['proxy']:
            br.set_proxies({'http': self._global_options['proxy'], 'https': self._global_options['proxy']})
        # additional settings
        br.set_handle_robots(False)
        # set timeout
        socket.setdefaulttimeout(self._global_options['timeout'])
        return br
项目:mechanize    作者:python-mechanize    | 项目源码 | 文件源码
def test_encoding(self):
        import mechanize
        from StringIO import StringIO
        import urllib
        # always take first encoding, since that's the one from the real HTTP
        # headers, rather than from HTTP-EQUIV
        b = mechanize.Browser()
        for s, ct in [
            ("", mechanize._html.DEFAULT_ENCODING),
            ("Foo: Bar\r\n\r\n", mechanize._html.DEFAULT_ENCODING),
            ("Content-Type: text/html; charset=UTF-8\r\n\r\n", "UTF-8"),
            ("Content-Type: text/html; charset=UTF-8\r\n"
             "Content-Type: text/html; charset=KOI8-R\r\n\r\n", "UTF-8"),
        ]:
            msg = mimetools.Message(StringIO(s))
            r = urllib.addinfourl(StringIO(""), msg, "http://www.example.com/")
            b.set_response(r)
            self.assertEqual(b.encoding(), ct)
项目:mechanize    作者:python-mechanize    | 项目源码 | 文件源码
def test_clone_browser(self):
        from mechanize import Browser
        br = Browser()
        br.set_handle_refresh(True, max_time=237, honor_time=True)
        br.set_handle_robots(False)
        cbr = copy.copy(br)
        for h, ch in zip(br.handlers, cbr.handlers):
            self.assertIsNot(h, ch)
            self.assertIs(ch.parent, cbr)
            self.assertIs(h.__class__, ch.__class__)
        self.assertEqual(set(br._ua_handlers), set(cbr._ua_handlers))
        self.assertIs(br._ua_handlers['_cookies'].cookiejar,
                      cbr._ua_handlers['_cookies'].cookiejar)
        self.assertIsNot(br.addheaders, cbr.addheaders)
        self.assertEqual(br.addheaders, cbr.addheaders)
        h = cbr._ua_handlers['_refresh']
        self.assertEqual((h.honor_time, h.max_time), (True, 237))
项目:mechanize    作者:python-mechanize    | 项目源码 | 文件源码
def performance_plot():
    def retrieve(url, filename):
        br = mechanize.Browser()
        br.retrieve(url, filename)

#     import urllib2
#     def retrieve(url, filename):
#         urllib2.urlopen(url).read()

#     from mechanize import _useragent
#     ua = _useragent.UserAgent()
#     ua.set_seekable_responses(True)
#     ua.set_handle_equiv(False)
#     def retrieve(url, filename):
#         ua.retrieve(url, filename)

    rows = []
    for size in power_2_range(256 * KB, 256 * MB):
        temp_maker = TempDirMaker()
        try:
            elapsed = time_retrieve_local_file(temp_maker, size, retrieve)
        finally:
            temp_maker.tear_down()
        rows.append((size // float(MB), elapsed))
    show_plot(rows)
项目:WintersWrath    作者:Elfsong    | 项目源码 | 文件源码
def wiglePrint(username, password, netid):
    browser = mechanize.Browser()
    browser.open('https://wigle.net/')
    reqData = urllib.urlencode({'credential_0': username, 'credential_1': password})
    browser.open('https://wigle.net/gps/gps/main/confirmquery', reqData)

    params = {}
    params['netid'] = netid
    reqParams = urllib.urlencode(params)

    resp = browser.open('https://api.wigle.net/api/v2/network/search?first=0&netid=' + netid).read()

    data = json.loads(resp)

    mapLat = 'N/A'
    mapLon = 'N/A'
    rLat = data['results'][0].get('trilat', None)
    if rLat:
        mapLat = rLat
    rLon = ata['results'][0].get('trilong', None)
    if rLon:
        mapLon = rLon
    print '[-] Lat: ' + mapLat + ', Lon: ' + mapLon
项目:AmpliSpy    作者:NullArray    | 项目源码 | 文件源码
def mech_ops():
    br = mechanize.Browser()
    br.set_handle_robots(False)
    br.addheaders = [('user-agent', select_UA()), ('accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')]

    try:
        response = br.open("http://www.public-dns.info/nameservers.txt")
    except Exception as e:
        print "\n[" + t.red("!") + "]Critical, could not open public-dns.info"
        print "[" + t.green("+") + "]The following status code was recieved:"
        print "\n %s" % (e)
        sys.exit(1)

    result = response.read()
    proc = result.rstrip().split('\n')

    return proc


# If args, read list, else fetch
项目:pydata_webscraping    作者:jmortega    | 项目源码 | 文件源码
def create_browser():
    br = mechanize.Browser()           # Create basic browser
    cj = cookielib.LWPCookieJar()      # Create cookiejar to handle cookies
    br.set_cookiejar(cj)               # Set cookie jar for our browser
    br.set_handle_equiv(True)          # Allow opening of certain files
    br.set_handle_gzip(True)           # Allow handling of zip files
    br.set_handle_redirect(True)       # Automatically handle auto-redirects
    br.set_handle_referer(True)
    br.set_handle_robots(False)        # ignore anti-robots.txt

    # Necessary headers to simulate an actual browser
    br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36'),
                   ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
                   ('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.3'),
                   ('Accept-Encoding', 'gzip,deflate,sdch'),
                   ('Accept-Language', 'en-US,en;q=0.8,fr;q=0.6'),
                   ('Connection', 'keep-alive')
                  ]
    return br
项目:pydata_webscraping    作者:jmortega    | 项目源码 | 文件源码
def translate(home_language,target_language,text):
    text = text.replace(" ","%20");

    get_url = "https://translate.google.com/?sl="+home_language+"&tl="+target_language+"&text="+text
    #print get_url
    browser = mechanize.Browser()
    # Disable loading robots.txt
    browser.set_handle_robots(False)

    browser.addheaders = [('User-agent',
                     'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98;)')]

    translated_text = browser.open(get_url)
    translated_text = translated_text.read().decode('UTF-8')

    soup = BeautifulSoup(translated_text, "lxml")
    div_content = soup.find('div', {'id' : 'gt-res-content'})
    converted_text = div_content.find('span', {'id':'result_box'}).text

    return converted_text
项目:brutat    作者:el2t    | 项目源码 | 文件源码
def __init__(self, site, passp, proxyp, timeout, passlist, proxylist, proxylen, username, time, br = mechanize.Browser()):

        self.site = site
        self.passp = passp
        self.proxyp = proxyp
        self.proxylist = proxylist
        self.proxylen = proxylen
        self.passlist = passlist
        self.username = username
        self.time = time
        self.br = br
        self.timeout = timeout



    # configure mechanize Browser
项目:various_stuff    作者:oujezdsky    | 项目源码 | 文件源码
def getInfo(ipaddr, userAgent, proxz, hostname):
    WEBFORM_NAME = 'search'
    browser = mechanize.Browser()
    browser.set_handle_robots(False)
    browser.set_handle_equiv(True)
    browser.set_handle_referer(True)
    browser.set_handle_redirect(True)
    browser.addheaders = userAgent
    # browser.set_proxies(proxz)
    cookie_jar = cookielib.LWPCookieJar()
    browser.set_cookiejar(cookie_jar)
    page = browser.open('https://apps.db.ripe.net/search/query.html')
    for form in browser.forms():
        if form.name == WEBFORM_NAME:
            browser.select_form(WEBFORM_NAME)
            browser.form['search:queryString'] = ipaddr
            browser.form['search:sources'] = ['GRS']
            submission = browser.submit().read()
            parsed_submission = BeautifulSoup(submission, 'html.parser')
            print ipaddr, '/',hostname
            for mainIndex in parsed_submission.find_all('ul', {'class': 'attrblock'}):
                for i, li in enumerate(mainIndex.findAll('li')):
                    if i in range(0, 2):
                        print '[+] ', li.text
            print '\n ########## \n'
项目:OpenCouture-Dev    作者:9-9-0    | 项目源码 | 文件源码
def __init__(self):
        self.br = mechanize.Browser()
        #self.cj = cookielib.LWPCookieJar()
        self.cj = cookielib.MozillaCookieJar()

        self.br.set_cookiejar(self.cj)
        self.br.set_handle_equiv(True)
        self.br.set_handle_referer(True)
        self.br.set_handle_robots(False)
        self.br.addheaders = [('User-agent', 'Firefox')]

        self.item_url = 'http://shop.bdgastore.com/collections/footwear/products/y-3-pureboost-zg'

        # Create variables for user credentials and a function to import them
项目:slack-shogi    作者:setokinto    | 项目源码 | 文件源码
def input_emojis(id_, password, team_id, two_factor, force_update=False):
    br = mechanize.Browser()
    br.set_handle_robots(False)
    br.open("https://{}.slack.com/".format(team_id))
    br.select_form(nr=0)
    br["email"] = id_
    br["password"] = password
    br.submit()
    if two_factor:
        br.select_form(nr=0)
        br["2fa_code"] = two_factor
        br.submit()

    count = 0
    for file_name in emojis:
        emoji_name = emojis[file_name]
        response = br.open(
            "https://{}.slack.com/customize/emoji".format(team_id))
        if response.read().find(emoji_name) >= 0 and not force_update:
            # Simple resume. Does it work?
            # FIXME: Use beautiful soup and search it using dom
            print("{}/{} skipped(already exists for the name '{}')".format(count,
                                                                           len(emojis), emoji_name))
            continue
        br.select_form(nr=0)
        br["name"] = emoji_prefix + emoji_name
        br.form.add_file(open(file_name), "images/png", file_name, name="img")
        br.submit()
        count += 1
        print("{}/{} completed".format(count, len(emojis)))
        time.sleep(1)
项目:sarafu    作者:pesaply    | 项目源码 | 文件源码
def __init__(self, account, pin, browser=None):
        """
        In some occasions where you'll make a number of requests
        to the server, you will want to store the mechanize browser
        object in some cache so it can be reused.
        This has the advantage of reducing the number of requests
        necessary to complete given tasks.
        The browser object can simply be created this way:
        > browser = mechanize.Browser()
        """
        self.account = account
        self.pin = pin
        self.br = browser or mechanize.Browser()
        self.br.set_handle_robots(False)
项目:pi_romulus    作者:ArthurMoore85    | 项目源码 | 文件源码
def __init__(self, rom=None, parent=None):
        """
        Scrapes a website
        :param search_query: User search query
        """
        self.parent = parent
        self.url = 'http://www.emuparadise.me'
        self.search_q = rom
        self.browser = mechanize.Browser()
项目:nthu-ee-progreport-autogen    作者:HW-Lee    | 项目源码 | 文件源码
def __init__(self, usrvar):
        if "vpn-username" in usrvar.keys() and "vpn-userpwd" in usrvar.keys():
            self.vpn = {"usr": usrvar["vpn-username"], "pwd": usrvar["vpn-userpwd"]}
        else:
            self.vpn = None
        self.rooturl = None
        self.usrname = usrvar["username"]
        self.usrpwd = usrvar["userpwd"]
        self.stuname = usrvar["studentname"]
        self.br = mechanize.Browser()
        self.br.set_handle_robots(False)
项目:PythonP2PBotnet    作者:jhoward321    | 项目源码 | 文件源码
def attack(address, htmlObject):
    br = Browser()
    br.open(address)

    br.click(htmlObject)
项目:DistributeCrawler    作者:SmallHedgehog    | 项目源码 | 文件源码
def __init__(self, cookie_filename=None, user_agent=None, timeout=None, **kwargs):
        try:
            import mechanize
        except ImportError:
            raise DependencyNotInstalledError('mechanize')

        if user_agent is None:
            user_agent = 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)'

        self.browser = mechanize.Browser()
        self.cj = cookielib.LWPCookieJar()
        if cookie_filename is not None:
            self.cj.load(cookie_filename)
        self.browser.set_cookiejar(self.cj)
        self.browser.set_handle_equiv(True)
        self.browser.set_handle_gzip(True)
        self.browser.set_handle_redirect(True)
        self.browser.set_handle_referer(True)
        self.browser.set_handle_robots(False)
        self.browser.addheaders = [
            ('User-agnet', user_agent)
        ]

        if timeout is None:
            # self._default_timout = mechanize._sockettimeout._GLOBAL_DEFAULT_TIMEOUT
            self._default_timout = 5
        else:
            self._default_timout = timeout
项目:Trity    作者:toxic-ig    | 项目源码 | 文件源码
def anon():
    br = mechanize.Browser()

    to = raw_input(R + "Enter the recipient address: ")
    subject = raw_input("Enter the subject: ")
    print "Message: "
    message = raw_input(">")

    #proxy = "http://127.0.0.1:8080"

    url = "http://anonymouse.org/anonemail.html"
    headers = "Mozilla/4.0 (compatible; MSIE 5.0; AOL 4.0; Windows 95; c_athome)"
    br.addheaders = [('User-agent', headers)]
    br.open(url)
    br.set_handle_equiv(True)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(True)
    br.set_handle_robots(False)
    br.set_debug_http(False)
    br.set_debug_redirects(False)
    #br.set_proxies({"http": proxy})

    br.select_form(nr=0)

    br.form['to'] = to
    br.form['subject'] = subject
    br.form['text'] = message

    result = br.submit()

    response = br.response().read()


    if "The e-mail has been sent anonymously!" in response:
        print "The email has been sent successfully!! \n The recipient will get it in up to 12 hours!"
    else:
        print "Failed to send email!"
项目:studentLogin_API_Flask    作者:PiyushGoyal443    | 项目源码 | 文件源码
def __init__(self):

        self.br = mechanize.Browser()
        self.br.set_handle_robots(False)
        self.br.set_handle_equiv(True)
        self.br.set_handle_gzip(True)
        self.br.set_handle_redirect(True)
        self.br.set_handle_referer(True)

    #for getting academic spotlight
项目:Project-ET    作者:p0rt22    | 项目源码 | 文件源码
def anonymousEmail(to, subject, message):
        br = mechanize.Browser()
        url = 'http://anonymouse.org/anonemail.html'
        headers = 'Mozilla/4.0 (compatible; MSIE 5.0; AOL 4.0; Windows 95; c_athome)'
        br.addheaders = [('User-agent', headers)]
        br.open(url)
        br.set_handle_equiv(True)
        br.set_handle_gzip(True)
        br.set_handle_redirect(True)
        br.set_handle_referer(True)
        br.set_handle_robots(False)
        br.set_debug_http(False)
        br.set_debug_redirects(False)

        br.select_form(nr=0)

        br.form['to'] = to
        br.form['subject'] = subject
        br.form['text'] = message

        result = br.submit()
        response = br.response().read()
        # fills all the forms on the website

        if 'The e-mail has been sent anonymously!' in response:
            print 'Success, the email will be sent shortly'
            pause()
            main()

        else:
            print 'Email failed to send'
            pause()
            main()
        # checks response from website
项目:cloudomate    作者:Jaapp-    | 项目源码 | 文件源码
def _create_browser():
        br = Browser()
        br.set_handle_robots(False)
        br.addheaders = [('User-agent', random.choice(user_agents))]
        return br
项目:cloudomate    作者:Jaapp-    | 项目源码 | 文件源码
def _get_network_cost(speed):
    br = Browser()
    br.addheaders = [('User-Agent', 'Firefox')]
    page = br.open('https://bitcoinfees.21.co/api/v1/fees/recommended')
    rates = json.loads(page.read())
    satoshirate = float(rates[speed])
    return satoshirate
项目:yogame    作者:tivisse    | 项目源码 | 文件源码
def _prepare_browser(self):
        self.br = mechanize.Browser()
        self.br.set_handle_equiv(True)
        self.br.set_handle_redirect(True)
        self.br.set_handle_referer(True)
        self.br.set_handle_robots(False)
        self.br.addheaders = self.HEADERS
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def browser(url):
    import mechanize

    # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
    br = mechanize.Browser()
    # Browser options
    br.set_handle_equiv(False)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(False)
    br.set_handle_robots(False)
    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    #br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
    #br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
    # Open some site, let's pick a random one, the first that pops in mind
    r = br.open(url)
    response = r.read()
    print response
    if "img,divreturn" in response:
        r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/"+url)
        print "prooooxy"
        response = r.read()

    return response
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def browser(url):
    import mechanize

    # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
    br = mechanize.Browser()
    # Browser options
    br.set_handle_equiv(False)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(False)
    br.set_handle_robots(False)
    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    #br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
    #br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
    # Open some site, let's pick a random one, the first that pops in mind
    r = br.open(url)
    response = r.read()
    print response
    if "img,divreturn" in response:
        r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/"+url)
        print "prooooxy"
        response = r.read()

    return response
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def browser(url):
    import mechanize

    # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
    br = mechanize.Browser()
    # Browser options
    br.set_handle_equiv(False)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(False)
    br.set_handle_robots(False)
    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
    #br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
    # Open some site, let's pick a random one, the first that pops in mind
    r = br.open(url)
    response = r.read()
    print response
    if "img,divreturn" in response:
        r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/"+url)
        print "prooooxy"
        response = r.read()

    return response
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def browser(url):
    import mechanize

    # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
    br = mechanize.Browser()
    # Browser options
    br.set_handle_equiv(False)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(False)
    br.set_handle_robots(False)
    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    #br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
    #br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
    # Open some site, let's pick a random one, the first that pops in mind
    r = br.open(url)
    response = r.read()
    print response
    if "img,divreturn" in response:
        r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/"+url)
        print "prooooxy"
        response = r.read()

    return response
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def browser(url):
    import mechanize

    # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
    br = mechanize.Browser()
    # Browser options
    br.set_handle_equiv(False)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(False)
    br.set_handle_robots(False)
    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    #br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
    #br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
    # Open some site, let's pick a random one, the first that pops in mind
    r = br.open(url)
    response = r.read()
    print response
    if "img,divreturn" in response:
        r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/"+url)
        print "prooooxy"
        response = r.read()

    return response
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def browser(url):
    import mechanize

    # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
    br = mechanize.Browser()
    # Browser options
    br.set_handle_equiv(False)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(False)
    br.set_handle_robots(False)
    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
    #br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
    # Open some site, let's pick a random one, the first that pops in mind
    r = br.open(url)
    response = r.read()
    print response
    if "img,divreturn" in response:
        r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/"+url)
        print "prooooxy"
        response = r.read()

    return response
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def browser(url):
    import mechanize

    # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
    br = mechanize.Browser()
    # Browser options
    br.set_handle_equiv(False)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(False)
    br.set_handle_robots(False)
    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    #br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
    #br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
    # Open some site, let's pick a random one, the first that pops in mind
    r = br.open(url)
    response = r.read()
    print response
    if "img,divreturn" in response:
        r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/"+url)
        print "prooooxy"
        response = r.read()

    return response
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def browser(url):
    import mechanize

    # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
    br = mechanize.Browser()
    # Browser options
    br.set_handle_equiv(False)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(False)
    br.set_handle_robots(False)
    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)

    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
    #br.addheaders =[('Cookie','SRCHD=D=4210979&AF=NOFORM; domain=.bing.com; expires=Wednesday, 09-Nov-06 23:12:40 GMT; MUIDB=36F71C46589F6EAD0BE714175C9F68FC; domain=www.bing.com;    expires=15 de enero de 2018 08:43:26 GMT+1')]

    # Open some site, let's pick a random one, the first that pops in mind
    r = br.open("http://anonymouse.org/cgi-bin/anon-www.cgi/"+url)
    response = r.read()
    if not ".ftrH,.ftrHd,.ftrD>" in response:
        print "proooxy"
        r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/"+url)
        response = r.read()
    return response
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def browser(url):
    import mechanize

    # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
    br = mechanize.Browser()
    # Browser options
    br.set_handle_equiv(False)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(False)
    br.set_handle_robots(False)
    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    #br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
    #br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
    # Open some site, let's pick a random one, the first that pops in mind
    r = br.open(url)
    response = r.read()
    print response
    if "img,divreturn" in response:
        r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/"+url)
        print "prooooxy"
        response = r.read()

    return response
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def browser(url):
    import mechanize

    # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
    br = mechanize.Browser()
    # Browser options
    br.set_handle_equiv(False)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(False)
    br.set_handle_robots(False)
    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)

    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
    #br.addheaders =[('Cookie','SRCHD=D=4210979&AF=NOFORM; domain=.bing.com; expires=Wednesday, 09-Nov-06 23:12:40 GMT; MUIDB=36F71C46589F6EAD0BE714175C9F68FC; domain=www.bing.com;    expires=15 de enero de 2018 08:43:26 GMT+1')]
    # Open some site, let's pick a random one, the first that pops in mind
    r = br.open(url)
    response = r.read()
        #if "z{a:1}" in response:
    if not ".ftrH,.ftrHd,.ftrD>" in response:
        print "proooxyy"
        r = br.open("http://anonymouse.org/cgi-bin/anon-www.cgi/"+url)
        response = r.read()
    return response
    ###def proxy(url):
    '''from lib import requests
    proxies = {"http": "http://anonymouse.org/cgi-bin/anon-www.cgi/"+url}
    print "zorro"
    print proxies
    rsp = requests.get(url, proxies=proxies,stream=True)
    print rsp.raw._fp.fp._sock.getpeername()
    print rsp.content
    response = rsp.content
    return response'''
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def browser(url):
    import mechanize

    # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
    br = mechanize.Browser()
    # Browser options
    br.set_handle_equiv(False)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(False)
    br.set_handle_robots(False)
    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
    #br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
    # Open some site, let's pick a random one, the first that pops in mind
    r = br.open(url)
    response = r.read()
    print response
    if "img,divreturn" in response:
        r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/"+url)
        print "prooooxy"
        response = r.read()

    return response
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def browser(url):
    import mechanize

    # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
    br = mechanize.Browser()
    # Browser options
    br.set_handle_equiv(False)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(False)
    br.set_handle_robots(False)
    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    #br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
    #br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
    # Open some site, let's pick a random one, the first that pops in mind
    r = br.open(url)
    response = r.read()
    print response
    if "img,divreturn" in response:
        r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/"+url)
        print "prooooxy"
        response = r.read()

    return response
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def browser(url):

    import mechanize

    # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
    br = mechanize.Browser()
    # Browser options
    br.set_handle_equiv(False)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(False)
    br.set_handle_robots(False)
    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
    #br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
    # Open some site, let's pick a random one, the first that pops in mind
    r = br.open(url)
    response = r.read()
    print response
    if "img,divreturn" in response:
        r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/"+url)
        print "prooooxy"
        response = r.read()

    return response
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def browser(url):
    import mechanize

    # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
    br = mechanize.Browser()
    # Browser options
    br.set_handle_equiv(False)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(False)
    br.set_handle_robots(False)
    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
    #br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
    # Open some site, let's pick a random one, the first that pops in mind
    r = br.open(url)
    response = r.read()
    print response
    if "img,divreturn" in response:
        r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/"+url)
        print "prooooxy"
        response = r.read()

    return response
项目:pelisalacarta-ce    作者:pelisalacarta-ce    | 项目源码 | 文件源码
def browser(url):
    import mechanize

    # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
    br = mechanize.Browser()
    # Browser options
    br.set_handle_equiv(False)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(False)
    br.set_handle_robots(False)
    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)

    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
    #br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
    # Open some site, let's pick a random one, the first that pops in mind
    r = br.open(url)
    response = r.read()
    print response
    if not ".ftrH,.ftrHd,.ftrD>" in response:
        r = br.open("http://anonymouse.org/cgi-bin/anon-www.cgi/"+url)
        print "prooooxy"
        response = r.read()
    return response
项目:relational-social-media-search-engine    作者:indervirbanipal    | 项目源码 | 文件源码
def login_into_facebook(self, creds_file):
        browser = mechanize.Browser()
        browser.set_handle_robots(False)
        cookie_jar = cookielib.LWPCookieJar()
        # browser.set_proxies({"http": "111.11.11.11"})
        browser.set_cookiejar(cookie_jar)
        browser.set_handle_equiv(True)
        browser.set_handle_redirect(True)
        browser.set_handle_referer(True)
        browser.set_handle_robots(False)
        browser.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
        browser.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.517.41 Safari/534.7')]
        browser.open("https://www.facebook.com/")
        browser.select_form(nr=0)
        filepath = os.path.join(os.path.dirname(__file__), creds_file)
        f = open(filepath, "r")
        data = f.readlines()
        list = []
        for line in data:
            if line:
                line = line.strip()
                list.append(line.split(" ")[0] + " " + line.split(" ")[1])
        random_line = random.choice(list)
        browser.form["email"] = random_line.split(" ")[0]
        browser.form["pass"] = random_line.split(" ")[1]
        print "login: ", browser.form["email"], " :: ", browser.form["pass"]
        browser.submit()
        return browser
项目:python-hacklib    作者:lazorfuzz    | 项目源码 | 文件源码
def _login_mechanize(self):
        try:
            import mechanize
        except:
            raise MissingPackageException('Please install the mechanize module before continuing.')
        # Sets up common input names/ids and creates instance of mechanize.Browser()
        userfields = ['user', 'username', 'usr', 'email', 'name', 'login', 'userid', 'userid-input', 'player']
        passfields = ['pass', 'password', 'passwd', 'pw', 'pwd']
        br = mechanize.Browser()
        br.set_handle_robots(False)
        br.set_handle_refresh(False)
        br.addheaders = [('User-agent', 'googlebot')]
        # Opens URL and lists controls
        response = br.open(self.url)
        loginurl = response.geturl()
        br.form = list(br.forms())[0]
        username_control = ''
        password_control = ''
        # Locates username and password input, and submits login info
        for control in br.form.controls:
            if control.name and control.name.lower() in userfields or control.id and control.id.lower() in userfields: username_control = control
            if control.name and control.name.lower() in passfields or control.id and control.id.lower() in passfields: password_control = control
        username_control.value = self.username
        try: password_control.value = self.password
        except:
            # Detected a username input but not a password input.
            # Submits form with username and attempts to detect password input in resulting page
            response = br.submit()
            br.form = list(br.forms())[0]
            for control in br.form.controls:
                if control.name and control.name.lower() in passfields or control.id and control.id.lower() in passfields: password_control = control
        password_control.value = self.password
        response = br.submit()
        # Returns response if the URL is changed. Assumes login failure if URL is the same
        if response.geturl() != loginurl:
            return response.read()
        else:
            raise Exception('Login credentials incorrect.')
项目:python-hacklib    作者:lazorfuzz    | 项目源码 | 文件源码
def getProxies(country_filter = 'ALL', proxy_type = ('Socks4', 'Socks5')):
    '''Gets list of recently tested Socks4/5 proxies.
    Return format is as follows:
    [IP, Port, Country Code, Country, Proxy Type, Anonymous, Yes/No, Last Checked]
    Args: country_filter: Specify country codes within a tuple, e.g. ('US', 'MX')
    proxy_type: Specify whic Socks version to use, e.g. 'Socks5'
    '''
    try: import mechanize
    except: raise MissingPackageException('Please install the mechanize module before continuing. Use hacklib.installDependencies()')
    try: from bs4 import BeautifulSoup
    except: raise MissingPackageException('Please install the beautifulsoup4 module before continuing. Use hacklib.installDependencies()')
    br = mechanize.Browser()
    br.set_handle_robots(False)
    br.addheaders = [('User-agent', 'googlebot')]
    data = br.open('http://www.socks-proxy.net').read()
    soup = BeautifulSoup(data, 'html.parser')
    proxylist = []
    table = soup.find('table')
    tbody = table.find('tbody')
    rows = tbody.find_all('tr')
    for row in rows:
        cols = row.find_all('td')
        cols = [ele.text.strip() for ele in cols]
        proxylist.append([ele for ele in cols if ele])
    filteredlist = []
    if not country_filter == 'ALL':
        for proxy in proxylist:
            if proxy[2] in country_filter:
                filteredlist.append(proxy)
        proxylist = filteredlist
        filteredlist = []
    if not proxy_type == ('Socks4', 'Socks5'):
        for proxy in proxylist:
            if not country_filter == 'ALL':
                if proxy[4] in proxy_type and proxy[2] in country_filter:
                    filteredlist.append(proxy)
            else:
                if proxy[4] in proxy_type: filteredlist.append(proxy)
        proxylist = filteredlist
    return proxylist