• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python urllib2.build_opener函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中urllib2.build_opener函数的典型用法代码示例。如果您正苦于以下问题:Python build_opener函数的具体用法?Python build_opener怎么用?Python build_opener使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了build_opener函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: send

    def send(self, uri, data=''):
        url = self.base_url + str(uri)
        req = urllib2.Request(url)
        # cookie enabled
        if self.cookie == '':
            self.cookie = cookielib.CookieJar()

        cookie_handler = urllib2.HTTPCookieProcessor(self.cookie)

        if self.debug:
            http_handler = urllib2.HTTPHandler(debuglevel=1)
            opener = urllib2.build_opener(cookie_handler, http_handler)
        else:
            opener = urllib2.build_opener(cookie_handler)

        req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0')
        req.add_header('Content-Type', 'application/x-www-form-urlencoded')
        req.add_header('Cache-Control', 'no-cache')
        req.add_header('Accept', '*/*')
        req.add_header('Connection', 'close')
        # post data
        if data:
            post_data = urllib.urlencode(data)
            req.add_data(post_data)
            req.add_header('Content-Length', len(post_data))
        try:
            response = opener.open(req)
        except urllib2.URLError, error:
            raise FetionError(400)
            exit()
开发者ID:lyplcr,项目名称:pyfetion,代码行数:30,代码来源:pyfetion.py


示例2: getResponseMixedData

	def getResponseMixedData(self, url, secureToken, dic, additionalOptions=None):
		"Method sets up a REST call with mixed body data such as multipart/form-data."
		
		# check whether proxy is given
		if "proxy" in globals():
			proxy_handler = urllib2.ProxyHandler(self.config.proxy)
			opener = urllib2.build_opener(proxy_handler)
			urllib2.install_opener(opener)
				
		multipart = urllib2.build_opener(MultipartPostHandler.MultipartPostHandler)
		urllib2.install_opener(multipart)
		
		req = urllib2.Request(url, dic.parameters())

		req.add_header('Authorization', self.config.SDK_AUTH+",oauth_token=\""+secureToken+"\"")
		req.add_header('User-Agent', self.config.SDK_VERSION)
		req.add_header('Accept', 'application/json')
		
		# sets additional header fields
		if additionalOptions != None:
			for key in additionalOptions:
				req.add_header(key, additionalOptions[key])
		
		try:
			response = urllib2.urlopen(req)
			
			response = json.loads(response.read())	
			
			return response
		
		except urllib2.HTTPError as e:
			
			raise TelekomException(json.loads(e.read()))
开发者ID:tschubotz,项目名称:box_fetch,代码行数:33,代码来源:TelekomJSONService.py


示例3: __init__

    def __init__(self, server_url, user_id, device_id, client_version,
                 proxies=None, proxy_exceptions=None,
                 password=None, token=None, repository="default",
                 ignored_prefixes=None, ignored_suffixes=None,
                 timeout=20, blob_timeout=None, cookie_jar=None,
                 upload_tmp_dir=None):
        self.timeout = timeout
        self.blob_timeout = blob_timeout
        if ignored_prefixes is not None:
            self.ignored_prefixes = ignored_prefixes
        else:
            self.ignored_prefixes = DEFAULT_IGNORED_PREFIXES

        if ignored_suffixes is not None:
            self.ignored_suffixes = ignored_suffixes
        else:
            self.ignored_suffixes = DEFAULT_IGNORED_SUFFIXES

        self.upload_tmp_dir = (upload_tmp_dir if upload_tmp_dir is not None
                               else tempfile.gettempdir())

        if not server_url.endswith('/'):
            server_url += '/'
        self.server_url = server_url

        # TODO: actually use the repository info in the requests
        self.repository = repository

        self.user_id = user_id
        self.device_id = device_id
        self.client_version = client_version
        self._update_auth(password=password, token=token)

        self.cookie_jar = cookie_jar
        cookie_processor = urllib2.HTTPCookieProcessor(
            cookiejar=cookie_jar)

        # Get proxy handler
        proxy_handler = get_proxy_handler(proxies,
                                          proxy_exceptions=proxy_exceptions,
                                          url=self.server_url)

        # Build URL openers
        self.opener = urllib2.build_opener(cookie_processor, proxy_handler)
        self.streaming_opener = urllib2.build_opener(cookie_processor,
                                                     proxy_handler,
                                                     *get_handlers())

        # Set Proxy flag
        self.is_proxy = False
        for handler in self.opener.handlers:
            if isinstance(handler, ProxyHandler):
                if handler.proxies:
                    self.is_proxy = True

        self.automation_url = server_url + 'site/automation/'
        self.batch_upload_url = 'batch/upload'
        self.batch_execute_url = 'batch/execute'

        self.fetch_api()
开发者ID:gabytamb,项目名称:nuxeo-drive,代码行数:60,代码来源:base_automation_client.py


示例4: get_html

    def get_html(self):

        # add cookile support
        cookie = cookielib.CookieJar()
        cookie_handler = urllib2.HTTPCookieProcessor(cookie)

        if self.agents:
            agent = choice(self.agents)
        else:
            agent = None

        # add agent support
        if agent:
            proxy_handler = urllib2.ProxyHandler({'http': agent})
            # proxy_handler = urllib2.ProxyHandler({'https': agent})
            opener = urllib2.build_opener(cookie_handler, proxy_handler)
        else:
            opener = urllib2.build_opener(cookie_handler)

        urllib2.install_opener(opener)
        try:
            datas = []
            for url in self.get_urls:
                req = urllib2.Request(url, headers=self.header)
                html = urllib2.urlopen(req, timeout=30).read()
                # add chinese support
                code = chardet.detect(html)['encoding']
                if code in self.zh_code:
                    html = html.decode('GBK').encode('utf-8')
                datas.append(html)
            return datas
        except Exception as e:
            raise Exception(e)
开发者ID:killingwolf,项目名称:python_learning,代码行数:33,代码来源:baidu_keyword.py


示例5: serveFile

	def serveFile(self, fURL, sendData, httphandler = None):
		cj = cookielib.LWPCookieJar(ustvpaths.COOKIE) 
		if httphandler is None:
			opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
		else:
			opener = urllib2.build_opener(httphandler, urllib2.HTTPCookieProcessor(cj))
		request = urllib2.Request(url = fURL)
		opener.addheaders = []
		d = {}
		sheaders = self.decodeHeaderString(''.join(self.headers.headers))
		for key in sheaders:
			d[key] = sheaders[key]
			if (key != 'Host'):
				opener.addheaders = [(key, sheaders[key])]
			if (key == 'User-Agent'):
				opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0')]
		if os.path.isfile(ustvpaths.COOKIE):
			cj.load(ignore_discard = True)
			cj.add_cookie_header(request)
		response = opener.open(request, timeout = TIMEOUT)
		self.send_response(200)
		headers = response.info()
		for key in headers:
			try:
				val = headers[key]
				self.send_header(key, val)
			except Exception, e:
				print e
				pass
开发者ID:MossyTC,项目名称:plugin.video.ustvvod,代码行数:29,代码来源:proxy.py


示例6: __init__

    def __init__ (self, base, params, user=None, password=None):
        self.base    = base
        if self.base[-1] not in "?&":
            if "?" in self.base:
                self.base += "&"
            else:
                self.base += "?"

        self.params  = {}
        if user is not None and password is not None:
           x = urllib2.HTTPPasswordMgrWithDefaultRealm()
           x.add_password(None, base, user, password)
           self.client  = urllib2.build_opener()
           auth = urllib2.HTTPBasicAuthHandler(x)
           self.client  = urllib2.build_opener(auth)
        else:
           self.client  = urllib2.build_opener()

        for key, val in self.defaultParams.items():
            if self.base.lower().rfind("%s=" % key.lower()) == -1:
                self.params[key] = val
        for key in self.fields:
            if params.has_key(key):
                self.params[key] = params[key]
            elif self.base.lower().rfind("%s=" % key.lower()) == -1:
                self.params[key] = ""
开发者ID:llerradyoh,项目名称:tilecache,代码行数:26,代码来源:Client.py


示例7: getResult

    def getResult(self, ip, cookieHandle, fileID):
        try:

            user_agent = random.choice(self.user_agents)
            proxy = urllib2.ProxyHandler({'http':''+ ip +''})
            opener = urllib2.build_opener(proxy)
            opener.addheaders = [
                ('User_agent',user_agent),
                ('Referer','http://www.sufile.com/down/'+fileID+'.html'),
                ('Host','www.sufile.com'),
                ('DNT','1')
            ]

            opener = urllib2.build_opener(cookieHandle)
            r = opener.open('http://www.sufile.com/dd.php?file_key='+fileID+'&p=0', timeout=10)
            d = r.read()

            with open('./result.html', 'wb') as f:
                f.write(d)


            p = re.compile('<a id="downs" href="(.*?)"', re.S)
            r = re.search(p, d)
            print r.group(1).strip()


        except urllib2.HTTPError, e:
            print 'HTTPError: ' + str(e.code)
            return False
开发者ID:belloving,项目名称:py1,代码行数:29,代码来源:open2.py


示例8: refresh_feed

    def refresh_feed(self, rssurl):
        """
        Parses through the content of rss feed, using a proxy, if configured,
        uses cache for the feed content if memcached is in use.

        :param str rssurl: URL to RSS Feed
        :returns: List of RSS entries
        """
        headers = []

        opener = urllib2.build_opener()
        proxy = self.http_proxy

        # If proxy set, add custom handlers
        if proxy:
            urlinfo = urlparse(proxy)
            proxyhandler = urllib2.ProxyHandler({urlinfo.scheme : proxy})
            opener = urllib2.build_opener(proxyhandler, urllib2.HTTPHandler, urllib2.HTTPSHandler)

        # TODO: Use feedparser
        xml = minidom.parse(opener.open(rssurl))
        if xml:
            root = xml.documentElement
            for node in root.childNodes:
                if node.nodeName == "item":
                    headers.append(self.get_header(node))
                if node.nodeName == "channel":
                    for channel_child in node.childNodes:
                        if channel_child.nodeName == "item":
                            headers.append(self.get_header(channel_child))

        return headers
开发者ID:alvabai,项目名称:trac-multiproject,代码行数:32,代码来源:rss_macro.py


示例9: send_web_socket

def send_web_socket(Cookie_Jar,url_to_call):
    try:
        import urllib2
        import base64
        import uuid
        req = urllib2.Request(url_to_call)

        str_guid=str(uuid.uuid1()).upper()
        str_guid=base64.b64encode(str_guid)
        req.add_header('Connection', 'Upgrade')
        req.add_header('Upgrade', 'websocket')

        req.add_header('Sec-WebSocket-Key', str_guid)
        req.add_header('Origin','http://www.streamafrik.com')
        req.add_header('Pragma','no-cache')
        req.add_header('Cache-Control','no-cache')
        req.add_header('Sec-WebSocket-Version', '13')
        req.add_header('Sec-WebSocket-Extensions', 'permessage-deflate; client_max_window_bits, x-webkit-deflate-frame')
        req.add_header('User-Agent','Mozilla/5.0 (iPhone; CPU iPhone OS 7_0_4 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11B554a Safari/9537.53')
        cookie_handler = urllib2.HTTPCookieProcessor(Cookie_Jar)
        opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
        opener = urllib2.install_opener(opener)
        from keepalive import HTTPHandler
        keepalive_handler = HTTPHandler()
        opener = urllib2.build_opener(keepalive_handler)
        urllib2.install_opener(opener)
        urllib2.urlopen(req)
        response.close()
        return ''
    except: traceback.print_exc(file=sys.stdout)
    return ''
开发者ID:bilbiten,项目名称:ShaniXBMCWork,代码行数:31,代码来源:genericPlayer.py


示例10: getOpener

 def getOpener(self):
     #return the opener
     cj=cookielib.CookieJar()
     if self.__proxy is not None:
         return urllib2.build_opener(urllib2.ProxyHandler({"http":self.__proxy}),urllib2.HTTPCookieProcessor(cj))
     else:
         return urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) 
开发者ID:asevans48,项目名称:CrawlerAids,代码行数:7,代码来源:GetPage.py


示例11: doLogin

def doLogin(adminHash):
	sys.stdout.write("(+) Logging into CMS.. ")
	sys.stdout.flush
	adminIndex = "http://" + options.target + options.dirPath + "openedit/authentication/logon.html"
	values = {'loginokpage' : '', 'accountname' : 'admin', 'password' : adminHash, 'submit' : 'Login'}
	data = urllib.urlencode(values)
	cj = CookieJar()
    	if options.proxy:
        	try:
            		opener = urllib2.build_opener(getProxy(), urllib2.HTTPCookieProcessor(cj))
                        opener.addheaders = [('User-agent', agent)]
	            	check = opener.open(adminIndex, data).read()
        	except:
            		print "\n(-) Proxy connection failed to remote target"
            		sys.exit(1)
    	else:
        	try:
            		opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
            		check = opener.open(adminIndex, data).read()
            	except:
            		print "(-) Target connection failed, check your address"
            		sys.exit(1)
	if not re.search("Please enter your password", check):
        	sys.stdout.write("logged in successfully\n")
        	sys.stdout.flush()
		return cj
    	else:
        	sys.stdout.write("Login Failed! Exiting..\n")
        	sys.stdout.flush()
        	sys.exit(1)
开发者ID:0x24bin,项目名称:exploit-database,代码行数:30,代码来源:16157.py


示例12: continuity

def continuity(url):
    import md5
    format = '%25s: %s'

    # first fetch the file with the normal http handler
    opener = urllib2.build_opener()
    urllib2.install_opener(opener)
    fo = urllib2.urlopen(url)
    foo = fo.read()
    fo.close()
    m = md5.new(foo)
    print format % ('normal urllib', m.hexdigest())

    # now install the keepalive handler and try again
    opener = urllib2.build_opener(HTTPHandler())
    urllib2.install_opener(opener)

    fo = urllib2.urlopen(url)
    foo = fo.read()
    fo.close()
    m = md5.new(foo)
    print format % ('keepalive read', m.hexdigest())

    fo = urllib2.urlopen(url)
    foo = ''
    while 1:
        f = fo.readline()
        if f: foo = foo + f
        else: break
    fo.close()
    m = md5.new(foo)
    print format % ('keepalive readline', m.hexdigest())
开发者ID:intech,项目名称:sqlmap,代码行数:32,代码来源:keepalive.py


示例13: call_service

    def call_service(self):
        """调用远程服务"""
        try:
            encode_data = None
            if self.params is not None:
                if self.method == 'GET':
                    self.url += '?' + urlencode(self.params)
                    log_debug(self.url)

                elif self.method == 'POST':
                    encode_data = urlencode(self.params)

            opener = urllib2.build_opener()
            opener.addheaders = self.headers
            
            if self.cookie_jar is not None:
                opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie_jar))

            res_obj = opener.open(self.url, data=encode_data, timeout=self.timeout)
            self.set_cookie = res_obj.info().getheader('Set-Cookie')
            self.res = res_obj.read()

            # encoding
            self.encoding = guess_json_utf(self.res)
            if self.encoding:
                self.res = self.res.decode(self.encoding)

            self.json = json.loads(self.res)
            self.ret  = self.json.get('ret')
            self.msg  = self.json.get('msg')
            self.data = self.json.get('data')
        except Exception, e:
            #log_error('[JSONService] url:%s, response:%s, expetion:%s' % (self.url, self.res, e))
            return False
开发者ID:newagemusic,项目名称:b,代码行数:34,代码来源:net.py


示例14: loadUrl

def loadUrl(url, profiler, enable_proxy = False):
    loadtime = 0
    try:
        begin = time.time()
        req = urllib2.Request(url)
        req.add_header("User-Agent", "Mozilla/5.0 (Linux; Android 5.1.1; Nexus 5 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36")
        req.add_header("Accept-Encoding", "gzip,deflate,sdch")
        req.add_header("Accept", "*/*")
        req.add_header("Cache-Control", "no-cache")
        if enable_proxy:
            print "USE Turbo Proxy!!!"
            proxy_handler = urllib2.ProxyHandler({"http": turbo_local_proxy})
            opener =  urllib2.build_opener(proxy_handler)
        else:
            opener =  urllib2.build_opener()
        resp = opener.open(req, timeout = 1000000)
        cntype = resp.headers.getheader("content-type")
        print "content-type", cntype
        print "status code", resp.getcode()
        # print "headers", resp.headers
        size = len(resp.read())
        loadtime = time.time() - begin
        print "page size", size
        print "loadtime is ", loadtime
        profiler.addSize(size)
        profiler.addRescources(url, resp.getcode() ,cntype, int(loadtime * 1000))
        return loadtime
    except ValueError:
        pass
    finally:
        opener.close()
开发者ID:gr8lakes,项目名称:proxytest,代码行数:31,代码来源:main.py


示例15: get_urllib_object

def get_urllib_object(uri, timeout, headers=None, verify_ssl=True, data=None):
    """Return a urllib2 object for `uri` and `timeout` and `headers`.

    This is better than using urlib2 directly, for it handles SSL verifcation, makes
    sure URI is utf8, and is shorter and easier to use.  Modules may use this
    if they need a urllib2 object to execute .read() on.

    For more information, refer to the urllib2 documentation.

    """

    uri = quote_query(uri)
    original_headers = {'Accept': '*/*', 'User-Agent': 'Mozilla/5.0 (Willie)'}
    if headers is not None:
        original_headers.update(headers)
    else:
        headers = original_headers
    if verify_ssl:
        opener = urllib2.build_opener(VerifiedHTTPSHandler)
    else:
        opener = urllib2.build_opener()
    req = urllib2.Request(uri, headers=headers, data=data)
    try:
        u = opener.open(req, None, timeout)
    except urllib2.HTTPError as e:
        # Even when there's an error (say HTTP 404), return page contents
        return e.fp

    return u
开发者ID:Haus1,项目名称:willie,代码行数:29,代码来源:web.py


示例16: notify

def notify(field, args) :
    if peer == '':
        return
    #args['token'] = token
    if debug :
        log = open('/tmp/ticket_listener', 'a')
        log.writelines('in \n')
    if not functions.has_key(field) :
        return
    url = peer + functions[field]
    postData=json.dumps(args)
    if use_htaccess :
        passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
        passman.add_password(None, url, htaccess['login'], htaccess['pwd'])
        authhandler = urllib2.HTTPBasicAuthHandler(passman)
        opener = urllib2.build_opener(authhandler)
    else :
        opener = urllib2.build_opener()
    req =  urllib2.Request(url, postData, {'Content-Type': 'application/json'})
    urllib2.install_opener(opener)
    res = urllib2.urlopen(req)
    if debug :
        log.writelines('url : %s\nPOST data : %s\nresponse : %s\ninfo : %s'%(url, str(args), res.read(), res.info()))
        log.writelines('out \n')
        log.close()
开发者ID:nyuhuhuu,项目名称:trachacks,代码行数:25,代码来源:PlanetForgePubSub.py


示例17: testCreateItem

 def testCreateItem(self):
     """ Ensure that items can be created
     """
     # first, retrieve an item template
     my_opener = urllib2.build_opener(MyHTTPSHandler(self.item_templt, 200))
     z.urllib2.install_opener(my_opener)
     zot = z.Zotero('myuserID', 'myuserkey')
     t = zot.item_template('book')
     # Update the item type
     t['itemType'] = 'journalArticle'
     # Add keys which should be removed before the data is sent
     t['key'] = 'KEYABC123'
     t['etag'] = 'TAGABC123'
     t['group_id'] = 'GROUPABC123'
     t['updated'] = '14 March, 2011'
     # new opener which will return 403
     my_opener = urllib2.build_opener(MyHTTPSHandler(self.items_doc, 403))
     z.urllib2.install_opener(my_opener)
     with self.assertRaises(z.ze.UserNotAuthorised) as e:
         _ = zot.create_items([t])
     exc = str(e.exception)
     # this test is a kludge; we're checking the POST data in the 403 response
     self.assertIn("journalArticle", exc)
     self.assertNotIn("KEYABC123", exc)
     self.assertNotIn("TAGABC123", exc)
     self.assertNotIn("GROUPABC123", exc)
     self.assertNotIn("updated", exc)
开发者ID:avram,项目名称:pyzotero,代码行数:27,代码来源:tests.py


示例18: get_urlopen

def get_urlopen():
    proxy_type = get_prefs('proxy_type');
    if proxy_type == 'http':
        scheme = 'http'
        host = str(get_prefs('proxy_host'))
        port = str(get_prefs('proxy_port'))
        url = scheme + '://' + host + ':' + port
        if get_prefs('proxy_auth'):
            proxy_support = urllib2.ProxyHandler({ 'http': url, 'https': url })
            username = str(get_prefs('proxy_auth_name'))
            password = str(get_prefs('proxy_auth_password'))
            auth_handler = urllib2.ProxyBasicAuthHandler()
            auth_handler.add_password(None, url, username, password)
            return urllib2.build_opener(proxy_support, auth_handler).open
        else:
            proxy_support = urllib2.ProxyHandler({ 'http': url, 'https': url })
            return urllib2.build_opener(proxy_support).open
    elif proxy_type == 'system':
        if 'http_proxy' in os.environ and os.environ["http_proxy"]:
            url = os.environ["http_proxy"]
        elif 'HTTP_PROXY' in os.environ and os.environ["HTTP_PROXY"]:
            url = os.environ["HTTP_PROXY"]
        else:
            url = None

        if not url:
            return urllib2.urlopen
        else:
            proxy_support = urllib2.ProxyHandler({ 'http': url, 'https': url })
            return urllib2.build_opener(proxy_support).open
    else:
        return urllib2.urlopen
开发者ID:luislobo,项目名称:Hotot,代码行数:32,代码来源:agent.py


示例19: send

 def send(self, req):
     # req is our own Request object
     if HTTP_DEBUG:
         opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie_jar), urllib2.HTTPHandler(debuglevel=1))
     elif COOKIES_ENABLED:
         opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie_jar))
     else:
         opener = urllib2.build_opener()
     if req.method.upper() == 'POST':
         request = urllib2.Request(req.url, req.body, req.headers)
     else:  
         request = urllib2.Request(req.url, None, req.headers)  # urllib2 assumes a GET if no data is supplied.  PUT and DELETE are not supported
     
     # timed message send+receive (TTLB)
     req_start_time = self.default_timer()
     try:
         resp = opener.open(request)  # this sends the HTTP request and returns as soon as it is done connecting and sending
         connect_end_time = self.default_timer()
         content = resp.read()
         req_end_time = self.default_timer()
     except httplib.HTTPException, e:  # this can happen on an incomplete read, just catch all HTTPException
         connect_end_time = self.default_timer()
         resp = ErrorResponse()
         resp.code = 0
         resp.msg = str(e)
         resp.headers = {}
         content = ''
开发者ID:DrLoboto,项目名称:pylt,代码行数:27,代码来源:engine.py


示例20: BuildURLOpener

def BuildURLOpener(server):
    """
    if there should be no proxy used use an empty proxy_handler - only necessary in Windows,
    where IE proxy settings are used automatically if available
    In UNIX $HTTP_PROXY will be used
    The MultipartPostHandler is needed for submitting multipart forms from Opsview
    """
    # trying with changed digest/basic auth order as some digest auth servers do not
    # seem to work wi the previous way
    if str(server.use_proxy) == "False":
        server.proxy_handler = urllib2.ProxyHandler({})
        urlopener = urllib2.build_opener(server.digest_handler,\
                                         server.basic_handler,\
                                         server.proxy_handler,\
                                         urllib2.HTTPCookieProcessor(server.Cookie),\
                                         MultipartPostHandler)
    elif str(server.use_proxy) == "True":
        if str(server.use_proxy_from_os) == "True":
            urlopener = urllib2.build_opener(server.digest_handler,\
                                             server.basic_handler,\
                                             urllib2.HTTPCookieProcessor(server.Cookie),\
                                             MultipartPostHandler)
        else:
            # if proxy from OS is not used there is to add a authenticated proxy handler
            server.passman.add_password(None, server.proxy_address, server.proxy_username, server.proxy_password)
            server.proxy_handler = urllib2.ProxyHandler({"http": server.proxy_address, "https": server.proxy_address})
            server.proxy_auth_handler = urllib2.ProxyBasicAuthHandler(server.passman)
            urlopener = urllib2.build_opener(server.proxy_handler,\
                                            server.proxy_auth_handler,\
                                            server.digest_handler,\
                                            server.basic_handler,\
                                            urllib2.HTTPCookieProcessor(server.Cookie),\
                                            MultipartPostHandler)
    return urlopener
开发者ID:catharsis,项目名称:Nagstamon,代码行数:34,代码来源:Actions.py



注:本文中的urllib2.build_opener函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python urllib2.parse_http_list函数代码示例发布时间:2022-05-27
下一篇:
Python urllib2.addinfourl函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap