• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python urlparse.clear_cache函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中urlparse.clear_cache函数的典型用法代码示例。如果您正苦于以下问题:Python clear_cache函数的具体用法?Python clear_cache怎么用?Python clear_cache使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了clear_cache函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: dash_R_cleanup

def dash_R_cleanup(fs, ps, pic):
    import gc, copy_reg
    import _strptime, linecache, dircache
    import urlparse, urllib, urllib2, mimetypes, doctest
    import struct, filecmp
    from distutils.dir_util import _path_created

    # Restore some original values.
    warnings.filters[:] = fs
    copy_reg.dispatch_table.clear()
    copy_reg.dispatch_table.update(ps)
    sys.path_importer_cache.clear()
    sys.path_importer_cache.update(pic)

    # Clear assorted module caches.
    _path_created.clear()
    re.purge()
    _strptime._regex_cache.clear()
    urlparse.clear_cache()
    urllib.urlcleanup()
    urllib2.install_opener(None)
    dircache.reset()
    linecache.clearcache()
    mimetypes._default_mime_types()
    struct._cache.clear()
    filecmp._cache.clear()
    doctest.master = None

    # Collect cyclic trash.
    gc.collect()
开发者ID:alkorzt,项目名称:pypy,代码行数:30,代码来源:regrtest.py


示例2: custom_scheme_redirect

def custom_scheme_redirect(url_redirect):
    # urlparse.urlsplit doesn't currently handle custom schemes,
    # which we want our callback URLs to support so mobile apps can register
    # their own callback scheme handlers.
    # See http://bugs.python.org/issue9374
    # and http://stackoverflow.com/questions/1417958/parse-custom-uris-with-urlparse-python

    scheme = urlparse.urlsplit(url_redirect)[0]

    scheme_lists = [urlparse.uses_netloc, urlparse.uses_query, urlparse.uses_fragment, urlparse.uses_params, urlparse.uses_relative]
    scheme_lists_modified = []

    # Modify urlparse's internal scheme lists so it properly handles custom schemes
    if scheme:
        for scheme_list in scheme_lists:
            if scheme not in scheme_list:
                scheme_list.append(scheme)
                scheme_lists_modified.append(scheme_list)

    # Clear cache before re-parsing url_redirect
    urlparse.clear_cache()

    # Grab flask/werkzeug redirect result
    redirect_result = redirect(url_redirect)

    # Restore previous urlparse scheme list
    for scheme_list in scheme_lists_modified:
        scheme_list.remove(scheme)

    return redirect_result
开发者ID:PaulWagener,项目名称:khan-website,代码行数:30,代码来源:auth_util.py


示例3: url_is_acceptable

  def url_is_acceptable(self,url):
    parsed = urlparse.urlparse(url)

    # Work-around a nasty bug. urlparse() caches parsed results and returns them on future calls,
    # and if the cache isn't cleared here, then a unicode string gets added to the cache, which
    # freaks out cherrypy when it independently calls urlparse() with the same URL later.
    urlparse.clear_cache()

    return parsed[0] in self.allowed_schemes
开发者ID:osborne6,项目名称:luminotes,代码行数:9,代码来源:Html_cleaner.py


示例4: urlsplit

    def urlsplit(url, scheme='', allow_fragments=True):
        """Parse a URL into 5 components:
        <scheme>://<netloc>/<path>?<query>#<fragment>
        Return a 5-tuple: (scheme, netloc, path, query, fragment).
        Note that we don't break the components up in smaller bits
        (e.g. netloc is a single string) and we don't expand % escapes."""
        allow_fragments = bool(allow_fragments)
        key = url, scheme, allow_fragments, type(url), type(scheme)
        cached = _parse_cache.get(key, None)
        if cached:
            return cached
        if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
            clear_cache()
        netloc = query = fragment = ''
        i = url.find(':')
        if i > 0:
            if url[:i] == 'http': # optimize the common case
                scheme = url[:i].lower()
                url = url[i+1:]
                if url[:2] == '//':
                    netloc, url = _splitnetloc(url, 2)
                    if (('[' in netloc and ']' not in netloc) or
                            (']' in netloc and '[' not in netloc)):
                        raise ValueError("Invalid IPv6 URL")
                if allow_fragments and '#' in url:
                    url, fragment = url.split('#', 1)
                if '?' in url:
                    url, query = url.split('?', 1)
                v = SplitResult(scheme, netloc, url, query, fragment)
                _parse_cache[key] = v
                return v
            for c in url[:i]:
                if c not in scheme_chars:
                    break
            else:
                # make sure "url" is not actually a port number (in which case
                # "scheme" is really part of the path)
                rest = url[i+1:]
                if not rest or any(c not in '0123456789' for c in rest):
                    # not a port number
                    scheme, url = url[:i].lower(), rest

        if url[:2] == '//':
            netloc, url = _splitnetloc(url, 2)
            if (('[' in netloc and ']' not in netloc) or
                    (']' in netloc and '[' not in netloc)):
                raise ValueError("Invalid IPv6 URL")
        if allow_fragments and '#' in url:
            url, fragment = url.split('#', 1)
        if '?' in url:
            url, query = url.split('?', 1)
        v = SplitResult(scheme, netloc, url, query, fragment)
        _parse_cache[key] = v
        return v
开发者ID:CollinsMuiruri,项目名称:Instagram,代码行数:54,代码来源:_compat.py


示例5: dash_R_cleanup

def dash_R_cleanup(fs, ps, pic, zdc, abcs):
    import gc, copy_reg
    import _strptime, linecache
    dircache = test_support.import_module('dircache', deprecated=True)
    import urlparse, urllib, urllib2, mimetypes, doctest
    import struct, filecmp
    from distutils.dir_util import _path_created

    # Clear the warnings registry, so they can be displayed again
    for mod in sys.modules.values():
        if hasattr(mod, '__warningregistry__'):
            del mod.__warningregistry__

    # Restore some original values.
    warnings.filters[:] = fs
    copy_reg.dispatch_table.clear()
    copy_reg.dispatch_table.update(ps)
    sys.path_importer_cache.clear()
    sys.path_importer_cache.update(pic)
    try:
        import zipimport
    except ImportError:
        pass # Run unmodified on platforms without zipimport support
    else:
        zipimport._zip_directory_cache.clear()
        zipimport._zip_directory_cache.update(zdc)

    # clear type cache
    sys._clear_type_cache()

    # Clear ABC registries, restoring previously saved ABC registries.
    for abc, registry in abcs.items():
        abc._abc_registry = registry.copy()
        abc._abc_cache.clear()
        abc._abc_negative_cache.clear()

    # Clear assorted module caches.
    _path_created.clear()
    re.purge()
    _strptime._regex_cache.clear()
    urlparse.clear_cache()
    urllib.urlcleanup()
    urllib2.install_opener(None)
    dircache.reset()
    linecache.clearcache()
    mimetypes._default_mime_types()
    filecmp._cache.clear()
    struct._clearcache()
    doctest.master = None

    # Collect cyclic trash.
    gc.collect()
开发者ID:carlosrcjunior,项目名称:BCC-2s13-PI4-WebCrawler,代码行数:52,代码来源:regrtest.py


示例6: dash_R_cleanup

def dash_R_cleanup(fs, ps, pic, abcs):
    import gc, copy_reg
    import _strptime, linecache
    dircache = test_support.import_module('dircache', deprecated=True)
    import urlparse, urllib, urllib2, mimetypes, doctest
    import struct, filecmp
    from distutils.dir_util import _path_created

    # Clear the warnings registry, so they can be displayed again
    for mod in sys.modules.values():
        if hasattr(mod, '__warningregistry__'):
            del mod.__warningregistry__

    # Restore some original values.
    warnings.filters[:] = fs
    copy_reg.dispatch_table.clear()
    copy_reg.dispatch_table.update(ps)
    sys.path_importer_cache.clear()
    sys.path_importer_cache.update(pic)

    # clear type cache
    sys._clear_type_cache()

    # Clear ABC registries, restoring previously saved ABC registries.
    for abc, registry in abcs.items():
        abc._abc_registry = registry.copy()
        abc._abc_cache.clear()
        abc._abc_negative_cache.clear()

    # Clear assorted module caches.
    _path_created.clear()
    re.purge()
    _strptime._regex_cache.clear()
    urlparse.clear_cache()
    urllib.urlcleanup()
    urllib2.install_opener(None)
    dircache.reset()
    linecache.clearcache()
    mimetypes._default_mime_types()
    filecmp._cache.clear()
    struct._clearcache()
    doctest.master = None

    if _llvm:
        code_types = (types.CodeType, types.FunctionType, types.MethodType)
        for obj in gc.get_objects():
            if isinstance(obj, code_types):
                _llvm.clear_feedback(obj)

    # Collect cyclic trash.
    gc.collect()
开发者ID:ianloic,项目名称:unladen-swallow,代码行数:51,代码来源:regrtest.py


示例7: _safe_urlsplit

def _safe_urlsplit(s):
    """the urlparse.urlsplit cache breaks if it contains unicode and
    we cannot control that.  So we force type cast that thing back
    to what we think it is.
    """
    rv = urlparse.urlsplit(s)
    # we have to check rv[2] here and not rv[1] as rv[1] will be
    # an empty bytestring in case no domain was given.
    if type(rv[2]) is not type(s):
        assert hasattr(urlparse, 'clear_cache')
        urlparse.clear_cache()
        rv = urlparse.urlsplit(s)
        assert type(rv[2]) is type(s)
    return rv
开发者ID:kyleconroy,项目名称:uricore,代码行数:14,代码来源:wkz_urls.py


示例8: cleanup

 def cleanup():
     import _strptime, urlparse, warnings, dircache
     from distutils.dir_util import _path_created
     _path_created.clear()
     warnings.filters[:] = fs
     gc.collect()
     sre.purge()
     _strptime._regex_cache.clear()
     urlparse.clear_cache()
     copy_reg.dispatch_table.clear()
     copy_reg.dispatch_table.update(ps)
     sys.path_importer_cache.clear()
     sys.path_importer_cache.update(pic)
     dircache.reset()
开发者ID:BackupTheBerlios,项目名称:pyasynchio-svn,代码行数:14,代码来源:regrtest.py


示例9: trace_memory_clean_caches

    def trace_memory_clean_caches(self):
        """ Avoid polluting results with some builtin python caches """

        urlparse.clear_cache()
        re.purge()
        linecache.clearcache()
        copy_reg.clear_extension_cache()

        if hasattr(fnmatch, "purge"):
            fnmatch.purge()  # pylint: disable=no-member
        elif hasattr(fnmatch, "_purge"):
            fnmatch._purge()

        if hasattr(encodings, "_cache") and len(encodings._cache) > 0:
            encodings._cache = {}

        context.log.handler.flush()
开发者ID:IAlwaysBeCoding,项目名称:mrq,代码行数:17,代码来源:job.py


示例10: test_urlparse

    def test_urlparse(self):
        """
        For a given URL, L{http.urlparse} should behave the same as
        L{urlparse}, except it should always return C{str}, never C{unicode}.
        """
        def urls():
            for scheme in ('http', 'https'):
                for host in ('example.com',):
                    for port in (None, 100):
                        for path in ('', 'path'):
                            if port is not None:
                                host = host + ':' + str(port)
                                yield urlunsplit((scheme, host, path, '', ''))


        def assertSameParsing(url, decode):
            """
            Verify that C{url} is parsed into the same objects by both
            L{http.urlparse} and L{urlparse}.
            """
            urlToStandardImplementation = url
            if decode:
                urlToStandardImplementation = url.decode('ascii')
            standardResult = urlparse(urlToStandardImplementation)
            scheme, netloc, path, params, query, fragment = http.urlparse(url)
            self.assertEqual(
                (scheme, netloc, path, params, query, fragment),
                standardResult)
            self.assertTrue(isinstance(scheme, str))
            self.assertTrue(isinstance(netloc, str))
            self.assertTrue(isinstance(path, str))
            self.assertTrue(isinstance(params, str))
            self.assertTrue(isinstance(query, str))
            self.assertTrue(isinstance(fragment, str))

        # With caching, unicode then str
        clear_cache()
        for url in urls():
            assertSameParsing(url, True)
            assertSameParsing(url, False)

        # With caching, str then unicode
        clear_cache()
        for url in urls():
            assertSameParsing(url, False)
            assertSameParsing(url, True)

        # Without caching
        for url in urls():
            clear_cache()
            assertSameParsing(url, True)
            clear_cache()
            assertSameParsing(url, False)
开发者ID:MatthewTurk,项目名称:codenode,代码行数:53,代码来源:test_http.py


示例11: processURL


#.........这里部分代码省略.........
            return title.string
        else:
            return ''


    def getLinkedPages(soup, u, domains):
        newPaths = []
        anchors = soup.findAll('a')
        for a in anchors:
            try:
                href = a['href']
            except KeyError:
                continue

            scheme, host, port, path = my_parse(href)

            if scheme in ('http', 'https', '') and host in domains:
                if path == '' or path[0] != '/':
                    # relative path
                    pathList = u.pathList()[:-1] 
                    currpath = '/'.join(pathList) 
                    if currpath:
                        currpath = '/' + currpath
                    path = currpath + '/' + path
                    path = n_url.normURLPath(path)

                args = n_url.URL.fromString(path).queryList()
                path = '/'+'/'.join(n_url.URL.fromString(path).pathList())
                query = ''
                for arg in args: 
                    if arg[0] in ['page']:
                       query = '?page=%s'%arg[1]
                path = path.encode('ascii')
                path = urllib.quote(path)+query.encode('ascii')
                newPaths.append(path)
            else:
#                print '** Ignore', href
                pass

        return newPaths


    def getSectionAndSummary(soup):
        if id is None:
            return 'any', ''
        summary = soup.findAll('div', attrs={'id':id})
        text = summary[0].findAll(lambda tag: hasattr(tag,'string') and tag.string is not None)
        #for t in text:
            #if t.name in ['h1','h2','h3','h4','strong']:
                #print '***',t.string
            #else:
                #print '---',t.string
            
                    
        if text:
            summary = ' .'.join( [t.string for t in text] )
            section = 'any'
            summary = re.sub( '\s+', ' ', summary)
            #print 'storing', section, ',',summary
            return section, summary[:300]

        return 'any', ''


    def gotPage(page, factory):

        u = n_url.URL.fromString(factory.url)

        if not page.startswith('<!DOCTYPE'):
            # Don't like the look of this url so I won't try and process it
            return factory.url, []
        soup = BeautifulSoup(page)
        title = getTitle(soup)
        content = getIndexableContent(soup)
        newPaths = getLinkedPages(soup, u, domains)
        section, summary = getSectionAndSummary(soup)

        #print '****'
        #print '>> URL', factory.url
        #print '>> content', content

        args = u.queryList()
        query = ''
        for arg in args: 
            if arg[0] in ['page']:
                query = '?page=%s'%arg[1]
        key = '/' + '/'.join(u.pathList()) + query

        if query == '':
            hypeIndex.addDocument(key, title, section, summary, content)

        return key, newPaths
                


    urlparse.clear_cache()
    factory = getPage(urlToGet)
    d = factory.deferred
    d.addCallback(gotPage, factory)
    return d
开发者ID:timparkin,项目名称:into-the-light,代码行数:101,代码来源:index_static.py



注:本文中的urlparse.clear_cache函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python urlparse.parse_qs函数代码示例发布时间:2022-05-27
下一篇:
Python views.route函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap