本文整理汇总了Python中os.path.commonprefix函数的典型用法代码示例。如果您正苦于以下问题:Python commonprefix函数的具体用法?Python commonprefix怎么用?Python commonprefix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了commonprefix函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: getFilesToBackup
def getFilesToBackup(self):
filesToBackup = []
for hash,files in self.currentBackupSet.items():
if not self.previousBackupSet.has_key(hash):
filesToBackup.append((hash,files))
else:
previousFiles = self.previousBackupSet[hash]
found = False
equals = []
for previousEntry in previousFiles:
for file in files:
base = file[0][len(path.commonprefix([file[0],self.options.source])):].lstrip("/")
if base in previousEntry[0]:
found = True
else:
equals.append(previousEntry[0])
if not found:
if self.options.verbose:
pass
newFiles = []
for f in files:
newFiles.append(f[0][len(path.commonprefix([f[0],self.options.source])):].lstrip("/"))
print warn("Duplicate:"), "image already backed up under different name: %s == %s" % (newFiles,equals)
if not self.options.only_hash: filesToBackup.append((hash,[[file[0]]]))
return filesToBackup
开发者ID:langdal,项目名称:imagebackup,代码行数:25,代码来源:image-backup-1.9.py
示例2: longest_common_prefix_suffix
def longest_common_prefix_suffix(s1, s2):
"""
:param s1:
:param s2:
:return:
"""
return commonprefix([s1, s2[::-1]]), commonprefix([s1[::-1], s2])
开发者ID:yoyonel,项目名称:CG,代码行数:8,代码来源:solution_lcsp.py
示例3: get_output_file
def get_output_file(infiles):
prefix = path.commonprefix(infiles)
suffix = path.commonprefix([f[::-1] for f in infiles])[::-1]
num_suffix = ''.join([c for c in itertools.takewhile(lambda x: x.isdigit(), suffix)])
num_prefix = ''.join([c for c in itertools.takewhile(lambda x: x.isdigit(), prefix[::-1])])[::-1]
prefix = prefix[:len(prefix)-len(num_prefix)]
suffix = suffix[len(num_suffix):]
diffs = [s[len(prefix):len(s)-len(suffix)] for s in infiles]
output_file = prefix + min(diffs, key=alphanum_key) + '-' + max(diffs, key=alphanum_key) + suffix
return output_file
开发者ID:hogenshpogen,项目名称:muon-loss,代码行数:10,代码来源:file_utils.py
示例4: regex_opt_inner
def regex_opt_inner(strings, open_paren):
"""Return a regex that matches any string in the sorted list of strings."""
close_paren = open_paren and ')' or ''
# print strings, repr(open_paren)
if not strings:
# print '-> nothing left'
return ''
first = strings[0]
if len(strings) == 1:
# print '-> only 1 string'
return open_paren + escape(first) + close_paren
if not first:
# print '-> first string empty'
return open_paren + regex_opt_inner(strings[1:], '(?:') \
+ '?' + close_paren
if len(first) == 1:
# multiple one-char strings? make a charset
oneletter = []
rest = []
for s in strings:
if len(s) == 1:
oneletter.append(s)
else:
rest.append(s)
if len(oneletter) > 1: # do we have more than one oneletter string?
if rest:
# print '-> 1-character + rest'
return open_paren + regex_opt_inner(rest, '') + '|' \
+ make_charset(oneletter) + close_paren
# print '-> only 1-character'
return make_charset(oneletter)
prefix = commonprefix(strings)
if prefix:
plen = len(prefix)
# we have a prefix for all strings
# print '-> prefix:', prefix
return open_paren + escape(prefix) \
+ regex_opt_inner([s[plen:] for s in strings], '(?:') \
+ close_paren
# is there a suffix?
strings_rev = [s[::-1] for s in strings]
suffix = commonprefix(strings_rev)
if suffix:
slen = len(suffix)
# print '-> suffix:', suffix[::-1]
return open_paren \
+ regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
+ escape(suffix[::-1]) + close_paren
# recurse on common 1-string prefixes
# print '-> last resort'
return open_paren + \
'|'.join(regex_opt_inner(list(group[1]), '')
for group in groupby(strings, lambda s: s[0] == first[0])) \
+ close_paren
开发者ID:2015E8014661092,项目名称:jinjaysnow.github.io,代码行数:54,代码来源:regexopt.py
示例5: compile
def test_that_a_valid_license_notice_exists_in_every_source_file_and_that_global_licensing_information_is_correct(self):
license_notice = compile(r"""(?P<comment_start>#|--|//) This Source Code Form is subject to the terms of the Mozilla Public
(?P=comment_start) License, v\. 2\.0\. If a copy of the MPL was not distributed with this file,
(?P=comment_start) You can obtain one at http://mozilla\.org/MPL/2\.0/\.
(?P=comment_start)
(?P=comment_start) Copyright \(c\) (?P<first_year>20\d\d)(-(?P<last_year>20\d\d))?, Lars Asplund lars\.anders\[email protected]\.com""")
log_date = compile(r'Date:\s*(?P<year>20\d\d)-\d\d-\d\d')
licensed_files = []
repo_root = abspath(join(dirname(__file__), '..'))
for root, dirs, files in walk(repo_root):
for f in files:
if 'preprocessed' in root:
continue
osvvm_directory = abspath(join(repo_root, 'vhdl', 'osvvm'))
if commonprefix([osvvm_directory, abspath(join(root, f))]) == osvvm_directory:
continue
osvvm_integration_example_directory = abspath(join(repo_root, 'examples', 'osvvm_integration', 'src'))
if commonprefix([osvvm_integration_example_directory, abspath(join(root, f))]) == osvvm_integration_example_directory:
continue
if splitext(f)[1] in ['.vhd', '.vhdl', '.py', '.v', '.sv']:
licensed_files.append(join(root, f))
i = 0
min_first_year = None
max_last_year = None
for f in licensed_files:
stdout.write('\r%d/%d' % (i + 1, len(licensed_files)))
stdout.flush()
i += 1
proc = Popen(['git', 'log', '--follow', '--date=short', f], \
bufsize=0, stdout=PIPE, stdin=PIPE, stderr=STDOUT, universal_newlines=True)
out, _ = proc.communicate()
first_year = None
last_year = None
for date in log_date.finditer(out):
first_year = int(date.group('year')) if first_year is None else min(int(date.group('year')), first_year)
last_year = int(date.group('year')) if last_year is None else max(int(date.group('year')), last_year)
min_first_year = first_year if min_first_year is None else min(min_first_year, first_year)
max_last_year = last_year if max_last_year is None else max(max_last_year, last_year)
with open(f) as fp:
code = fp.read()
match = license_notice.search(code)
self.assertIsNotNone(match, "Failed to find license notice in %s" % f)
if first_year == last_year:
self.assertEqual(int(match.group('first_year')), first_year, 'Expected copyright year to be %d in %s' % (first_year, f))
self.assertIsNone(match.group('last_year'), 'Expected no copyright years range in %s' % join(root, f))
else:
self.assertIsNotNone(match.group('last_year'), 'Expected copyright year range %d-%d in %s' % (first_year, last_year, f))
self.assertEqual(int(match.group('first_year')), first_year, 'Expected copyright year range to start with %d in %s' % (first_year, f))
self.assertEqual(int(match.group('last_year')), last_year, 'Expected copyright year range to end with %d in %s' % (last_year, f))
print('\n')
开发者ID:tomasnilefrost,项目名称:vunit,代码行数:51,代码来源:test_license.py
示例6: _check_lookahead
def _check_lookahead(self, inp):
"""
Check a counterexample for lookahead transitions using prefix-closed
queries. If an unknown lookahead is found it is added on the observation
table.
Args:
inp (list): Counterexample input.
"""
# Make a prefix closed membership query and gather the result
prefix = []
prefix_set = [[]]
prefix_set_input = [[]]
for c in inp:
prefix.append(c)
prefix_set_input.append(prefix)
prefix_set.append(self.membership_query(prefix))
for i in xrange(1, len(prefix_set)):
if commonprefix([prefix_set[i], prefix_set[i-1]]) != prefix_set[i-1]:
logging.debug('Lookahead detected at position %s : %s, %s',
i, prefix_set[i-1], prefix_set[i])
la_out = _remove_common_prefix(prefix_set[i], prefix_set[i-1])
j = None
for j in reversed(xrange(i)):
if commonprefix([prefix_set[i], prefix_set[j]]) == prefix_set[j]:
la_inp = inp[j:i]
break
la_out = _remove_common_prefix(prefix_set[i], prefix_set[j])
access_string = self._run_in_hypothesis(inp, j)
out_as = self.membership_query(access_string)
out_complete = self.membership_query(list(access_string)+la_inp)
# If The access string for the lookahead state is wrong, we will
# add the lookahead path once this is fixed in a next iteration.
if _remove_common_prefix(out_complete, out_as) != la_out:
logging.debug('Lookahead detected but access string is '+ \
'wrong, skipping.')
continue
if self.ot.add_lookahead_transition(access_string,
tuple(la_inp),
tuple(la_out)):
# Fill all table entries for the lookahead transition
for col in self.ot.dist_strings:
self._fill_ot_entry(access_string + tuple(la_inp), col)
# New lookahead added, no need for further processing.
break
开发者ID:GeorgeArgyros,项目名称:sflearn,代码行数:49,代码来源:angluin_fst_lookahead.py
示例7: _normalize_path
def _normalize_path(base_dir, path):
"""Helper to check paths passed to methods of this class.
Checks whether `path` is beneath `base_dir` and normalize it.
Additionally paths are converted into relative paths with respect to
`base_dir`, considering PWD in case of relative paths. This
is intended to be used in repository classes, which means that
`base_dir` usually will be the repository's base directory.
Parameters
----------
path: str
path to be normalized
base_dir: str
directory to serve as base to normalized, relative paths
Returns
-------
str:
path, that is a relative path with respect to `base_dir`
"""
if not path:
return path
base_dir = realpath(base_dir)
# path = normpath(path)
# Note: disabled normpath, because it may break paths containing symlinks;
# But we don't want to realpath relative paths, in case cwd isn't the correct base.
if isabs(path):
# path might already be a symlink pointing to annex etc,
# so realpath only its directory, to get "inline" with realpath(base_dir)
# above
path = opj(realpath(dirname(path)), basename(path))
if commonprefix([path, base_dir]) != base_dir:
raise FileNotInRepositoryError(msg="Path outside repository: %s"
% path, filename=path)
else:
pass
elif commonprefix([realpath(getpwd()), base_dir]) == base_dir:
# If we are inside repository, rebuilt relative paths.
path = opj(realpath(getpwd()), path)
else:
# We were called from outside the repo. Therefore relative paths
# are interpreted as being relative to self.path already.
return path
return relpath(path, start=base_dir)
开发者ID:WurstWorks,项目名称:datalad,代码行数:49,代码来源:gitrepo.py
示例8: discover
def discover(directories, index, filterfunc=lambda filename: True):
"""Import and initialize modules from `directories` list.
:param directories: list of directories
:param index: index function"""
def find(directories, filterfunc):
"""Discover and yield python modules (aka files that endswith .py) if
`filterfunc` returns True for that filename."""
for directory in directories:
for root, dirs, files in os.walk(directory):
for fname in files:
if fname.endswith('.py') and filterfunc(join(root, fname)):
yield join(root, fname)
for filename in find(directories, filterfunc):
modname, ext = os.path.splitext(os.path.basename(rchop(filename, os.sep + '__init__.py')))
fp, path, descr = imp.find_module(modname, directories)
prefix = commonprefix((PATH, filename))
if prefix:
modname = 'acrylamid.'
modname += rchop(filename[len(prefix):].replace(os.sep, '.'), '.py')
try:
mod = sys.modules[modname]
except KeyError:
try:
mod = imp.load_module(modname, fp, path, descr)
except (ImportError, SyntaxError, ValueError) as e:
log.exception('%r %s: %s', modname, e.__class__.__name__, e)
continue
index(mod)
开发者ID:DebVortex,项目名称:acrylamid,代码行数:35,代码来源:helpers.py
示例9: lookup
def lookup(self, index, word_array):
""" Get closest match to word (accepts imperfect matches)
:param list[str] word_array: str
:param int index: index of word in word_array to check
:return: closest match or None if none found
:rtype: str
"""
word = word_array[index]
logging.debug("looking up in indicator dictionary: " + word)
i = bisect_left(self.dict, word)
nearest_matches = self.dict[i - 1: i + 1]
# todo: return length of match as well
for i in range(0, len(nearest_matches)):
split = nearest_matches[i].split()
# require multi-word indicators to match exactly
# todo: after this, it's exact so don't use get_closest_matches
if len(split) > 1 and \
not self.match_multiple_words(split, word_array[index:]):
nearest_matches[i] = ""
match = get_close_matches(word, nearest_matches, n=1,
cutoff=min_indicator_distance)
if not match:
return None
match = match[0]
# todo: arbitrary, essentially checking stem of word
if word != match and len(commonprefix([word, match])) < 3:
return None
logging.debug("Closest match to " + word + " is " + match)
return match
开发者ID:jsrozner,项目名称:cryptic,代码行数:34,代码来源:solver.py
示例10: lca
def lca(scores1, scores2, tax):
classdict = dict()
for query, hit in scores1.iteritems():
scr1 = set(hit.keys())
scr2 = set(scores2[query].keys())
# find the common hits of both dictionaries
common = scr1.intersection(scr2)
commonscores = dict()
topscore = 0
for goodhit in common:
score = hit[goodhit] + scores2[query][goodhit]
commonscores[goodhit] = score
if score > topscore:
topscore = score
# remove from common all the scores that aren't at least 95% of topscore
minscore = 0.95 * topscore
topscores = commonscores.copy()
for goodhit in commonscores:
if commonscores[goodhit] < minscore:
del topscores[goodhit]
# get the LCA for these
classify = ""
for tophit in topscores:
if classify == "" and tophit in tax:
classify = str(tax[tophit])
else:
# print "And the common pref is " + commonprefix([classify, str(tax[tophit])])
classify = commonprefix([classify, str(tax[tophit])])
if classify == "" or classify == "[]":
classify = "Unclassified;"
# take longest substr ending in ;
meaningful = re.match(".+;", classify)
classify = meaningful.group()
classdict[query] = classify
return classdict
开发者ID:EnvGen,项目名称:Tutorials,代码行数:35,代码来源:taxonomy_blast_parser.py
示例11: validate_absolute_path
def validate_absolute_path(self, root, absolute_path):
"""Overrides StaticFileHandler's method to include authentication
"""
# Get the filename (or the base directory) of the result
len_prefix = len(commonprefix([root, absolute_path]))
base_requested_fp = absolute_path[len_prefix:].split(sep, 1)[0]
current_user = self.current_user
# If the user is an admin, then allow access
if current_user.level == 'admin':
return super(ResultsHandler, self).validate_absolute_path(
root, absolute_path)
# otherwise, we have to check if they have access to the requested
# resource
user_id = current_user.id
accessible_filepaths = check_access_to_analysis_result(
user_id, base_requested_fp)
# Turn these filepath IDs into absolute paths
db_files_base_dir = get_db_files_base_dir()
relpaths = filepath_ids_to_rel_paths(accessible_filepaths)
accessible_filepaths = {join(db_files_base_dir, relpath)
for relpath in relpaths.values()}
# check if the requested resource is a file (or is in a directory) that
# the user has access to
if join(root, base_requested_fp) in accessible_filepaths:
return super(ResultsHandler, self).validate_absolute_path(
root, absolute_path)
else:
raise QiitaPetAuthorizationError(user_id, absolute_path)
开发者ID:zonca,项目名称:qiita,代码行数:34,代码来源:analysis_handlers.py
示例12: printStatus
def printStatus(makeLog, makeAllLog, textTestTmp, smtpServer, out):
failed = ""
build = commonprefix([basename(makeLog), basename(makeAllLog)])
print >> out, build,
print >> out, datetime.now().ctime()
print >> out, "--"
print >> out, basename(makeLog)
warnings = 0
errors = 0
svnLocked = False
for l in file(makeLog):
if ("svn: Working copy" in l and "locked" in l) or "svn: Failed" in l:
svnLocked = True
failed += l
if re.search("[Ww]arn[ui]ng[: ]", l):
warnings += 1
if re.search("[Ee]rror[: ]", l) or re.search("[Ff]ehler[: ]", l):
errors += 1
failed += l
if svnLocked:
failed += "svn up failed\n\n"
print >> out, warnings, "warnings"
if errors:
print >> out, errors, "errors"
failed += "make failed\n\n"
print >> out, "--"
for root, dirs, files in os.walk(textTestTmp):
for f in files:
if f.startswith("batchreport"):
b = open(join(root, f))
l = b.readline()
if l.startswith("FAILED") or l.startswith("succeeded") or l.startswith("killed") or l.startswith("known bugs"):
print >> out, f, l,
b.close()
print >> out, "--"
print >> out, basename(makeAllLog)
warnings = 0
errors = 0
for l in file(makeAllLog):
if re.search("[Ww]arn[ui]ng[: ]", l):
warnings += 1
if "error " in l.lower():
errors += 1
failed += l
print >> out, warnings, "warnings"
if errors:
print >> out, errors, "errors"
failed += "make debug failed\n\n"
print >> out, "--"
if failed:
fromAddr = "[email protected]"
toAddr = "[email protected]"
message = """From: "%s" <%s>
To: %s
Subject: Error occurred while building
%s""" % (build, fromAddr, toAddr, failed)
server = smtplib.SMTP(smtpServer)
server.sendmail(fromAddr, toAddr, message)
server.quit()
开发者ID:RamonHPSilveira,项目名称:urbansim,代码行数:60,代码来源:status.py
示例13: flush
def flush(self):
# FIXME:
# - what about things like emoji zwj sequences?
# - normalize strings to better handle combining characters?
#
# >>> u"C\u0327"
# 'Ç'
# >>> len(u"C\u0327")
# 2
# >>> len(unicodedata.normalize('NFC', u"C\u0327"))
# 1
if len(self.before.replaced_text) > len(self.after.replaced_text):
assert self.before.replaced_text.endswith(self.after.replaced_text)
replaced_text = self.before.replaced_text
else:
assert self.after.replaced_text.endswith(self.before.replaced_text)
replaced_text = self.after.replaced_text
before = replaced_text[:len(replaced_text)-len(self.before.replaced_text)] + self.before.appended_text
after = replaced_text[:len(replaced_text)-len(self.after.replaced_text)] + self.after.appended_text
common_length = len(commonprefix([before, after]))
erased = len(before) - common_length
if erased:
self.output.send_backspaces(erased)
appended = after[common_length:]
if appended:
self.output.send_string(appended)
self.before.reset(self.after.trailing_space)
self.after.reset(self.after.trailing_space)
开发者ID:DanLanglois,项目名称:plover,代码行数:28,代码来源:formatting.py
示例14: score_model
def score_model(self, model_txn, txn):
"""Score an existing transaction for its ability to provide a model
for an incomplete transaction.
Args:
model_txn: The transaction to be scored.
txn: The incomplete transaction.
Returns:
A float number representing the score, normalized in [0,1].
"""
def get_description(txn):
return ('{} {}'.format(txn.payee or '', txn.narration or '')).strip()
# If the target transaction does not have a description, there is
# nothing we can do
txn_description = get_description(txn)
n_max = len(txn_description)
if n_max > 1:
# Only consider model transactions whose posting to the target
# account has the same sign as the transaction to be completed
posting = [p for p in model_txn.postings if p.account == self.account][0]
if number.same_sign(posting.units.number, txn.postings[0].units.number):
model_txn_description = get_description(model_txn)
n_match = len(path.commonprefix(
[model_txn_description, txn_description]))
score = float(n_match) / float(n_max)
return score
return 0
开发者ID:fxtlabs,项目名称:beansoup,代码行数:28,代码来源:transactions.py
示例15: __init__
def __init__(self, files, output_dir, name, format, reference, excludes):
logger.debug('Incoming files: %s', files)
basedir = commonprefix(files)
logger.debug('Prefix is "%s"', basedir)
if isdir(basedir):
self.basedir = basedir
else:
self.basedir = dirname(basedir)
logger.debug('Basedir is "%s"', self.basedir)
self.output_dir = output_dir
logger.debug('Output dir is "%s"', self.output_dir)
globfiles = list(chain.from_iterable(list(map(glob, files))))
logger.debug("Globfiles: %s", globfiles)
for file_or_dir in globfiles:
for walkroot, _, walkfiles in walk(file_or_dir):
for walkfile in walkfiles:
globfiles.append(join(walkroot, walkfile))
logger.debug('Resolved globfiles: %s', globfiles)
for exclude in (excludes or []):
if exclude[-1] is not '*':
exclude += '*'
evicts = fnmatch.filter(globfiles, exclude)
logger.debug("exclude '%s' evicts => %s", exclude, evicts)
globfiles = [
globfile for globfile in globfiles if globfile not in evicts
]
relative_files = [
r for r in
[relpath(globfile, self.basedir) for globfile in globfiles]
if r is not '.'
]
logger.debug('Resolved relative files: %s', relative_files)
self._files = OrderedDict.fromkeys(relative_files)
logger.debug("Initialized map, is now %s", self._files)
self.name = name
self.format = format
if not reference:
self.refdir = self.output_dir
elif not isdir(reference):
self.refdir = dirname(reference)
else:
self.refdir = reference
开发者ID:pombredanne,项目名称:hashedassets,代码行数:60,代码来源:map.py
示例16: _derive_module_name
def _derive_module_name(self, path):
common = commonprefix([self.working, path])
slice_module_name = slice(len(common) + 1, len(path))
return path[slice_module_name]\
.replace('.py', '')\
.replace('\\', '.')\
.replace('/', '.')
开发者ID:hfeeki,项目名称:pyspecs,代码行数:7,代码来源:_loader.py
示例17: lca
def lca(scores1, scores2, percent, tax):
classdict = dict()
for query, hit in scores1.iteritems():
scr1 = set(hit.keys())
scr2 = set(scores2[query].keys())
#find the common hits of both dictionaries
common = scr1.intersection(scr2)
commonscores=dict()
for goodhit in common:
score = hit[goodhit] + scores2[query][goodhit]
commonscores[goodhit] = score
#get the top percent scores of this intersection
topcommon = toppercent(commonscores, percent)
#get the LCA for these
classify = ''
for query, score in topcommon.iteritems():
if classify == '':
classify = tax[query]
else:
classify = commonprefix([classify, tax[query]])
if classify == '':
classify = 'Unclassified;'
#print classify
#take longest substr ending in ;
meaningful = re.match(".+;", classify)
classify = meaningful.group()
classdict[query] = classify
#print query + "\t" + classify
return classdict
开发者ID:EnvGen,项目名称:toolbox,代码行数:29,代码来源:taxonomy_blast_parser.py
示例18: __contains__
def __contains__(self, path):
p1 = self.path
p2 = path
if self.IS_WINDOWS:
p1 = p1.lower()
p2 = p2.lower()
return commonprefix((p1 + sep, p2)) == p1 + sep
开发者ID:platformio,项目名称:platformio,代码行数:7,代码来源:piolib.py
示例19: get_articles
def get_articles(request):
query = request.GET.get('query', '')
tq = parse_tq(request.GET.get('tq', ''))
tqx = parse_tqx(request.GET.get('tqx', ''))
select = Article.select()
select = select.limit(tq.get('limit', 1))
select = select.offset(tq.get('offset', 0))
if query:
select = select.where(Article.subject % ('*%s*' % query))
subjects = [ a.subject for a in select ]
LOG.debug(lcs(subjects))
LOG.debug(commonprefix(subjects))
dt = gviz.DataTable({
'posted': ('datetime', 'Posted'),
'poster': ('string', 'Poster'),
'subject': ('string', 'Subject'),
'message_id': ('string', 'ID')
})
dt.LoadData( a._data for a in select )
dt_order = ['subject', 'posted', 'poster', 'message_id']
gviz_json = dt.ToJSonResponse(req_id=tqx.get('reqId', 0),
columns_order=dt_order)
return itty.Response(gviz_json, content_type='application/json')
开发者ID:scytrin,项目名称:py-nntp-indexer,代码行数:27,代码来源:server.py
示例20: safejoin
def safejoin(root, subpath):
if not SAFENAME.match(subpath):
raise BadName(u"unsafe path name: %r" % subpath)
path = join(root, subpath)
if commonprefix([root + sep, path]) != root + sep:
raise BadName(u"invalid relative path: %r" % subpath)
return path
开发者ID:dimagi,项目名称:commcare-hq,代码行数:7,代码来源:zipdb.py
注:本文中的os.path.commonprefix函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论