Details
Details
- Reviewers
- None
- Group Reviewers
hg-reviewers - Commits
- rHG566daffc607d: cleanup: use set literals where possible
Diff Detail
Diff Detail
- Repository
- rHG Mercurial
- Lint
Lint Skipped - Unit
Unit Tests Skipped
hg-reviewers |
Lint Skipped |
Unit Tests Skipped |
Path | Packages | |||
---|---|---|---|---|
M | hgext/fastannotate/commands.py (6 lines) | |||
M | hgext/largefiles/basestore.py (2 lines) | |||
M | hgext/mq.py (4 lines) | |||
M | hgext/rebase.py (4 lines) | |||
M | hgext/remotefilelog/__init__.py (2 lines) | |||
M | hgext/remotefilelog/repack.py (4 lines) | |||
M | hgext/show.py (2 lines) | |||
M | hgext/uncommit.py (2 lines) | |||
M | mercurial/bundle2.py (2 lines) | |||
M | mercurial/cmdutil.py (5 lines) | |||
M | mercurial/dagop.py (2 lines) | |||
M | mercurial/exchange.py (4 lines) | |||
M | mercurial/hbisect.py (2 lines) | |||
M | mercurial/help.py (2 lines) | |||
M | mercurial/merge.py (6 lines) | |||
M | mercurial/obsolete.py (2 lines) | |||
M | mercurial/revlogutils/deltas.py (2 lines) | |||
M | mercurial/wireprotov1server.py (2 lines) |
Commit | Parents | Author | Summary | Date |
---|---|---|---|---|
Martin von Zweigbergk | Apr 3 2019, 2:21 PM |
continue | continue | ||||
if showlines: | if showlines: | ||||
result, lines = result | result, lines = result | ||||
formatter.write(result, lines, existinglines=existinglines) | formatter.write(result, lines, existinglines=existinglines) | ||||
formatter.end() | formatter.end() | ||||
_newopts = set([]) | _newopts = set() | ||||
_knownopts = set([opt[1].replace('-', '_') for opt in | _knownopts = {opt[1].replace('-', '_') for opt in | ||||
(fastannotatecommandargs[r'options'] + commands.globalopts)]) | (fastannotatecommandargs[r'options'] + commands.globalopts)} | ||||
def _annotatewrapper(orig, ui, repo, *pats, **opts): | def _annotatewrapper(orig, ui, repo, *pats, **opts): | ||||
"""used by wrapdefault""" | """used by wrapdefault""" | ||||
# we need this hack until the obsstore has 0.0 seconds perf impact | # we need this hack until the obsstore has 0.0 seconds perf impact | ||||
if ui.configbool('fastannotate', 'unfilteredrepo'): | if ui.configbool('fastannotate', 'unfilteredrepo'): | ||||
repo = repo.unfiltered() | repo = repo.unfiltered() | ||||
# treat the file as text (skip the isbinary check) | # treat the file as text (skip the isbinary check) |
if key not in verified: | if key not in verified: | ||||
verified.add(key) | verified.add(key) | ||||
expectedhash = lfutil.readasstandin(fctx) | expectedhash = lfutil.readasstandin(fctx) | ||||
filestocheck.append((cset, filename, expectedhash)) | filestocheck.append((cset, filename, expectedhash)) | ||||
failed = self._verifyfiles(contents, filestocheck) | failed = self._verifyfiles(contents, filestocheck) | ||||
numrevs = len(verified) | numrevs = len(verified) | ||||
numlfiles = len(set([fname for (fname, fnode) in verified])) | numlfiles = len({fname for (fname, fnode) in verified}) | ||||
if contents: | if contents: | ||||
self.ui.status( | self.ui.status( | ||||
_('verified contents of %d revisions of %d largefiles\n') | _('verified contents of %d revisions of %d largefiles\n') | ||||
% (numrevs, numlfiles)) | % (numrevs, numlfiles)) | ||||
else: | else: | ||||
self.ui.status( | self.ui.status( | ||||
_('verified existence of %d revisions of %d largefiles\n') | _('verified existence of %d revisions of %d largefiles\n') | ||||
% (numrevs, numlfiles)) | % (numrevs, numlfiles)) |
msg = '' | msg = '' | ||||
self.ui.write(patchname, label='qseries.' + state) | self.ui.write(patchname, label='qseries.' + state) | ||||
self.ui.write(': ') | self.ui.write(': ') | ||||
self.ui.write(msg, label='qseries.message.' + state) | self.ui.write(msg, label='qseries.message.' + state) | ||||
else: | else: | ||||
self.ui.write(patchname, label='qseries.' + state) | self.ui.write(patchname, label='qseries.' + state) | ||||
self.ui.write('\n') | self.ui.write('\n') | ||||
applied = set([p.name for p in self.applied]) | applied = {p.name for p in self.applied} | ||||
if length is None: | if length is None: | ||||
length = len(self.series) - start | length = len(self.series) - start | ||||
if not missing: | if not missing: | ||||
if self.ui.verbose: | if self.ui.verbose: | ||||
idxwidth = len("%d" % (start + length - 1)) | idxwidth = len("%d" % (start + length - 1)) | ||||
for i in pycompat.xrange(start, start + length): | for i in pycompat.xrange(start, start + length): | ||||
patch = self.series[i] | patch = self.series[i] | ||||
if patch in applied: | if patch in applied: | ||||
revsetpredicate = registrar.revsetpredicate() | revsetpredicate = registrar.revsetpredicate() | ||||
@revsetpredicate('mq()') | @revsetpredicate('mq()') | ||||
def revsetmq(repo, subset, x): | def revsetmq(repo, subset, x): | ||||
"""Changesets managed by MQ. | """Changesets managed by MQ. | ||||
""" | """ | ||||
revsetlang.getargs(x, 0, 0, _("mq takes no arguments")) | revsetlang.getargs(x, 0, 0, _("mq takes no arguments")) | ||||
applied = set([repo[r.node].rev() for r in repo.mq.applied]) | applied = {repo[r.node].rev() for r in repo.mq.applied} | ||||
return smartset.baseset([r for r in subset if r in applied]) | return smartset.baseset([r for r in subset if r in applied]) | ||||
# tell hggettext to extract docstrings from these functions: | # tell hggettext to extract docstrings from these functions: | ||||
i18nfunctions = [revsetmq] | i18nfunctions = [revsetmq] | ||||
def extsetup(ui): | def extsetup(ui): | ||||
# Ensure mq wrappers are called first, regardless of extension load order by | # Ensure mq wrappers are called first, regardless of extension load order by | ||||
# NOT wrapping in uisetup() and instead deferring to init stage two here. | # NOT wrapping in uisetup() and instead deferring to init stage two here. |
`obsoletewithoutsuccessorindestination` is a set with obsolete revisions | `obsoletewithoutsuccessorindestination` is a set with obsolete revisions | ||||
without a successor in destination. | without a successor in destination. | ||||
`obsoleteextinctsuccessors` is a set of obsolete revisions with only | `obsoleteextinctsuccessors` is a set of obsolete revisions with only | ||||
obsolete successors. | obsolete successors. | ||||
""" | """ | ||||
obsoletenotrebased = {} | obsoletenotrebased = {} | ||||
obsoletewithoutsuccessorindestination = set([]) | obsoletewithoutsuccessorindestination = set() | ||||
obsoleteextinctsuccessors = set([]) | obsoleteextinctsuccessors = set() | ||||
assert repo.filtername is None | assert repo.filtername is None | ||||
cl = repo.changelog | cl = repo.changelog | ||||
nodemap = cl.nodemap | nodemap = cl.nodemap | ||||
extinctrevs = set(repo.revs('extinct()')) | extinctrevs = set(repo.revs('extinct()')) | ||||
for srcrev in rebaseobsrevs: | for srcrev in rebaseobsrevs: | ||||
srcnode = cl.node(srcrev) | srcnode = cl.node(srcrev) | ||||
# XXX: more advanced APIs are required to handle split correctly | # XXX: more advanced APIs are required to handle split correctly |
def gcclient(ui, cachepath): | def gcclient(ui, cachepath): | ||||
# get list of repos that use this cache | # get list of repos that use this cache | ||||
repospath = os.path.join(cachepath, 'repos') | repospath = os.path.join(cachepath, 'repos') | ||||
if not os.path.exists(repospath): | if not os.path.exists(repospath): | ||||
ui.warn(_("no known cache at %s\n") % cachepath) | ui.warn(_("no known cache at %s\n") % cachepath) | ||||
return | return | ||||
reposfile = open(repospath, 'rb') | reposfile = open(repospath, 'rb') | ||||
repos = set([r[:-1] for r in reposfile.readlines()]) | repos = {r[:-1] for r in reposfile.readlines()} | ||||
reposfile.close() | reposfile.close() | ||||
# build list of useful files | # build list of useful files | ||||
validrepos = [] | validrepos = [] | ||||
keepkeys = set() | keepkeys = set() | ||||
sharedcache = None | sharedcache = None | ||||
filesrepacked = False | filesrepacked = False |
return files | return files | ||||
# This only considers datapacks today, but we could broaden it to include | # This only considers datapacks today, but we could broaden it to include | ||||
# historypacks. | # historypacks. | ||||
VALIDEXTS = [".datapack", ".dataidx"] | VALIDEXTS = [".datapack", ".dataidx"] | ||||
# Either an oversize index or datapack will trigger cleanup of the whole | # Either an oversize index or datapack will trigger cleanup of the whole | ||||
# pack: | # pack: | ||||
oversized = set([os.path.splitext(path)[0] for path, ftype, stat in files | oversized = {os.path.splitext(path)[0] for path, ftype, stat in files | ||||
if (stat.st_size > maxsize and (os.path.splitext(path)[1] | if (stat.st_size > maxsize and (os.path.splitext(path)[1] | ||||
in VALIDEXTS))]) | in VALIDEXTS))} | ||||
for rootfname in oversized: | for rootfname in oversized: | ||||
rootpath = os.path.join(folder, rootfname) | rootpath = os.path.join(folder, rootfname) | ||||
for ext in VALIDEXTS: | for ext in VALIDEXTS: | ||||
path = rootpath + ext | path = rootpath + ext | ||||
repo.ui.debug('removing oversize packfile %s (%s)\n' % | repo.ui.debug('removing oversize packfile %s (%s)\n' % | ||||
(path, util.bytecount(os.stat(path).st_size))) | (path, util.bytecount(os.stat(path).st_size))) | ||||
os.unlink(path) | os.unlink(path) |
# merge or rebase targets. | # merge or rebase targets. | ||||
if basectx: | if basectx: | ||||
# TODO make this customizable? | # TODO make this customizable? | ||||
newheads = set(repo.revs('heads(%d::) - %ld - not public()', | newheads = set(repo.revs('heads(%d::) - %ld - not public()', | ||||
basectx.rev(), stackrevs)) | basectx.rev(), stackrevs)) | ||||
else: | else: | ||||
newheads = set() | newheads = set() | ||||
allrevs = set(stackrevs) | newheads | set([baserev]) | allrevs = set(stackrevs) | newheads | {baserev} | ||||
nodelen = longestshortest(repo, allrevs) | nodelen = longestshortest(repo, allrevs) | ||||
try: | try: | ||||
cmdutil.findcmd('rebase', commands.table) | cmdutil.findcmd('rebase', commands.table) | ||||
haverebase = True | haverebase = True | ||||
except (error.AmbiguousCommand, error.UnknownCommand): | except (error.AmbiguousCommand, error.UnknownCommand): | ||||
haverebase = False | haverebase = False | ||||
eligible = set(s.added) | set(s.modified) | set(s.removed) | eligible = set(s.added) | set(s.modified) | set(s.removed) | ||||
badfiles = set(match.files()) - eligible | badfiles = set(match.files()) - eligible | ||||
# Naming a parent directory of an eligible file is OK, even | # Naming a parent directory of an eligible file is OK, even | ||||
# if not everything tracked in that directory can be | # if not everything tracked in that directory can be | ||||
# uncommitted. | # uncommitted. | ||||
if badfiles: | if badfiles: | ||||
badfiles -= set([f for f in util.dirs(eligible)]) | badfiles -= {f for f in util.dirs(eligible)} | ||||
for f in sorted(badfiles): | for f in sorted(badfiles): | ||||
if f in s.clean: | if f in s.clean: | ||||
hint = _(b"file was not changed in working directory " | hint = _(b"file was not changed in working directory " | ||||
b"parent") | b"parent") | ||||
elif repo.wvfs.exists(f): | elif repo.wvfs.exists(f): | ||||
hint = _(b"file was untracked in working directory parent") | hint = _(b"file was untracked in working directory parent") | ||||
else: | else: |
commonnodes.add(cl.node(r)) | commonnodes.add(cl.node(r)) | ||||
if commonnodes: | if commonnodes: | ||||
# XXX: we should only send the filelogs (and treemanifest). user | # XXX: we should only send the filelogs (and treemanifest). user | ||||
# already has the changelog and manifest | # already has the changelog and manifest | ||||
packer = changegroup.getbundler(cgversion, repo, | packer = changegroup.getbundler(cgversion, repo, | ||||
oldmatcher=oldmatcher, | oldmatcher=oldmatcher, | ||||
matcher=newmatcher, | matcher=newmatcher, | ||||
fullnodes=commonnodes) | fullnodes=commonnodes) | ||||
cgdata = packer.generate(set([nodemod.nullid]), list(commonnodes), | cgdata = packer.generate({nodemod.nullid}, list(commonnodes), | ||||
False, 'narrow_widen', changelog=False) | False, 'narrow_widen', changelog=False) | ||||
part = bundler.newpart('changegroup', data=cgdata) | part = bundler.newpart('changegroup', data=cgdata) | ||||
part.addparam('version', cgversion) | part.addparam('version', cgversion) | ||||
if 'treemanifest' in repo.requirements: | if 'treemanifest' in repo.requirements: | ||||
part.addparam('treemanifest', '1') | part.addparam('treemanifest', '1') | ||||
return bundler | return bundler |
files is a list of files which are direct child of this directory | files is a list of files which are direct child of this directory | ||||
subdirs is a dictionary of sub-directory name as the key and it's own | subdirs is a dictionary of sub-directory name as the key and it's own | ||||
dirnode object as the value | dirnode object as the value | ||||
""" | """ | ||||
def __init__(self, dirpath): | def __init__(self, dirpath): | ||||
self.path = dirpath | self.path = dirpath | ||||
self.statuses = set([]) | self.statuses = set() | ||||
self.files = [] | self.files = [] | ||||
self.subdirs = {} | self.subdirs = {} | ||||
def _addfileindir(self, filename, status): | def _addfileindir(self, filename, status): | ||||
"""Add a file in this directory as a direct child.""" | """Add a file in this directory as a direct child.""" | ||||
self.files.append((filename, status)) | self.files.append((filename, status)) | ||||
def addfile(self, filename, status): | def addfile(self, filename, status): | ||||
date = dateutil.makedate() | date = dateutil.makedate() | ||||
datemaydiffer = True | datemaydiffer = True | ||||
else: | else: | ||||
date = old.date() | date = old.date() | ||||
if len(old.parents()) > 1: | if len(old.parents()) > 1: | ||||
# ctx.files() isn't reliable for merges, so fall back to the | # ctx.files() isn't reliable for merges, so fall back to the | ||||
# slower repo.status() method | # slower repo.status() method | ||||
files = set([fn for st in base.status(old)[:3] | files = {fn for st in base.status(old)[:3] for fn in st} | ||||
for fn in st]) | |||||
else: | else: | ||||
files = set(old.files()) | files = set(old.files()) | ||||
# add/remove the files to the working copy if the "addremove" option | # add/remove the files to the working copy if the "addremove" option | ||||
# was specified. | # was specified. | ||||
matcher = scmutil.match(wctx, pats, opts) | matcher = scmutil.match(wctx, pats, opts) | ||||
relative = scmutil.anypats(pats, opts) | relative = scmutil.anypats(pats, opts) | ||||
uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative) | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative) |
Returns a set of revision numbers that are DAG heads within the passed | Returns a set of revision numbers that are DAG heads within the passed | ||||
subset. | subset. | ||||
``nullrev`` is never included in the returned set, even if it is provided in | ``nullrev`` is never included in the returned set, even if it is provided in | ||||
the input set. | the input set. | ||||
""" | """ | ||||
headrevs = set(revs) | headrevs = set(revs) | ||||
parents = set([node.nullrev]) | parents = {node.nullrev} | ||||
up = parents.update | up = parents.update | ||||
for rev in revs: | for rev in revs: | ||||
up(parentsfn(rev)) | up(parentsfn(rev)) | ||||
headrevs.difference_update(parents) | headrevs.difference_update(parents) | ||||
return headrevs | return headrevs | ||||
def headrevssubset(revsfn, parentrevsfn, startrev=None, stoprevs=None): | def headrevssubset(revsfn, parentrevsfn, startrev=None, stoprevs=None): |
ui.debug("checking for updated bookmarks\n") | ui.debug("checking for updated bookmarks\n") | ||||
ancestors = () | ancestors = () | ||||
if pushop.revs: | if pushop.revs: | ||||
revnums = pycompat.maplist(repo.changelog.rev, pushop.revs) | revnums = pycompat.maplist(repo.changelog.rev, pushop.revs) | ||||
ancestors = repo.changelog.ancestors(revnums, inclusive=True) | ancestors = repo.changelog.ancestors(revnums, inclusive=True) | ||||
remotebookmark = listkeys(remote, 'bookmarks') | remotebookmark = listkeys(remote, 'bookmarks') | ||||
explicit = set([repo._bookmarks.expandname(bookmark) | explicit = {repo._bookmarks.expandname(bookmark) | ||||
for bookmark in pushop.bookmarks]) | for bookmark in pushop.bookmarks} | ||||
remotebookmark = bookmod.unhexlifybookmarks(remotebookmark) | remotebookmark = bookmod.unhexlifybookmarks(remotebookmark) | ||||
comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark) | comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark) | ||||
def safehex(x): | def safehex(x): | ||||
if x is None: | if x is None: | ||||
return x | return x | ||||
return hex(x) | return hex(x) |
Otherwise 'number' indicates the remaining possible candidates for | Otherwise 'number' indicates the remaining possible candidates for | ||||
the search and 'nodes' contains the next bisect target. | the search and 'nodes' contains the next bisect target. | ||||
'good' is True if bisect is searching for a first good changeset, False | 'good' is True if bisect is searching for a first good changeset, False | ||||
if searching for a first bad one. | if searching for a first bad one. | ||||
""" | """ | ||||
changelog = repo.changelog | changelog = repo.changelog | ||||
clparents = changelog.parentrevs | clparents = changelog.parentrevs | ||||
skip = set([changelog.rev(n) for n in state['skip']]) | skip = {changelog.rev(n) for n in state['skip']} | ||||
def buildancestors(bad, good): | def buildancestors(bad, good): | ||||
badrev = min([changelog.rev(n) for n in bad]) | badrev = min([changelog.rev(n) for n in bad]) | ||||
ancestors = collections.defaultdict(lambda: None) | ancestors = collections.defaultdict(lambda: None) | ||||
for rev in repo.revs("descendants(%ln) - ancestors(%ln)", good, good): | for rev in repo.revs("descendants(%ln) - ancestors(%ln)", good, good): | ||||
ancestors[rev] = [] | ancestors[rev] = [] | ||||
if ancestors[badrev] is None: | if ancestors[badrev] is None: | ||||
return badrev, None | return badrev, None |
' to show complete help)') | ' to show complete help)') | ||||
indicateomitted(rst, omitted) | indicateomitted(rst, omitted) | ||||
if mod: | if mod: | ||||
try: | try: | ||||
ct = mod.cmdtable | ct = mod.cmdtable | ||||
except AttributeError: | except AttributeError: | ||||
ct = {} | ct = {} | ||||
modcmds = set([c.partition('|')[0] for c in ct]) | modcmds = {c.partition('|')[0] for c in ct} | ||||
rst.extend(helplist(modcmds.__contains__)) | rst.extend(helplist(modcmds.__contains__)) | ||||
else: | else: | ||||
rst.append(_("(use 'hg help extensions' for information on enabling" | rst.append(_("(use 'hg help extensions' for information on enabling" | ||||
" extensions)\n")) | " extensions)\n")) | ||||
return rst | return rst | ||||
def helpextcmd(name, subtopic=None): | def helpextcmd(name, subtopic=None): | ||||
cmd, ext, doc = extensions.disabledcmd(ui, name, | cmd, ext, doc = extensions.disabledcmd(ui, name, |
path = checkunknowndirs(repo, wctx, f) | path = checkunknowndirs(repo, wctx, f) | ||||
if path is not None: | if path is not None: | ||||
pathconflicts.add(path) | pathconflicts.add(path) | ||||
elif m == ACTION_LOCAL_DIR_RENAME_GET: | elif m == ACTION_LOCAL_DIR_RENAME_GET: | ||||
if _checkunknownfile(repo, wctx, mctx, f, args[0]): | if _checkunknownfile(repo, wctx, mctx, f, args[0]): | ||||
fileconflicts.add(f) | fileconflicts.add(f) | ||||
allconflicts = fileconflicts | pathconflicts | allconflicts = fileconflicts | pathconflicts | ||||
ignoredconflicts = set([c for c in allconflicts | ignoredconflicts = {c for c in allconflicts | ||||
if repo.dirstate._ignore(c)]) | if repo.dirstate._ignore(c)} | ||||
unknownconflicts = allconflicts - ignoredconflicts | unknownconflicts = allconflicts - ignoredconflicts | ||||
collectconflicts(ignoredconflicts, ignoredconfig) | collectconflicts(ignoredconflicts, ignoredconfig) | ||||
collectconflicts(unknownconflicts, unknownconfig) | collectconflicts(unknownconflicts, unknownconfig) | ||||
else: | else: | ||||
for f, (m, args, msg) in actions.iteritems(): | for f, (m, args, msg) in actions.iteritems(): | ||||
if m == ACTION_CREATED_MERGE: | if m == ACTION_CREATED_MERGE: | ||||
fl2, anc = args | fl2, anc = args | ||||
different = _checkunknownfile(repo, wctx, mctx, f) | different = _checkunknownfile(repo, wctx, mctx, f) | ||||
def _filternarrowactions(narrowmatch, branchmerge, actions): | def _filternarrowactions(narrowmatch, branchmerge, actions): | ||||
""" | """ | ||||
Filters out actions that can ignored because the repo is narrowed. | Filters out actions that can ignored because the repo is narrowed. | ||||
Raise an exception if the merge cannot be completed because the repo is | Raise an exception if the merge cannot be completed because the repo is | ||||
narrowed. | narrowed. | ||||
""" | """ | ||||
nooptypes = set(['k']) # TODO: handle with nonconflicttypes | nooptypes = {'k'} # TODO: handle with nonconflicttypes | ||||
nonconflicttypes = set('a am c cm f g r e'.split()) | nonconflicttypes = set('a am c cm f g r e'.split()) | ||||
# We mutate the items in the dict during iteration, so iterate | # We mutate the items in the dict during iteration, so iterate | ||||
# over a copy. | # over a copy. | ||||
for f, action in list(actions.items()): | for f, action in list(actions.items()): | ||||
if narrowmatch(f): | if narrowmatch(f): | ||||
pass | pass | ||||
elif not branchmerge: | elif not branchmerge: | ||||
del actions[f] # just updating, ignore changes outside clone | del actions[f] # just updating, ignore changes outside clone |
direct = set() | direct = set() | ||||
for current in pendingnodes: | for current in pendingnodes: | ||||
direct.update(precursorsmarkers.get(current, ())) | direct.update(precursorsmarkers.get(current, ())) | ||||
pruned = [m for m in children.get(current, ()) if not m[1]] | pruned = [m for m in children.get(current, ()) if not m[1]] | ||||
direct.update(pruned) | direct.update(pruned) | ||||
pruned = [m for m in succsmarkers.get(current, ()) if not m[1]] | pruned = [m for m in succsmarkers.get(current, ()) if not m[1]] | ||||
direct.update(pruned) | direct.update(pruned) | ||||
direct -= seenmarkers | direct -= seenmarkers | ||||
pendingnodes = set([m[0] for m in direct]) | pendingnodes = {m[0] for m in direct} | ||||
seenmarkers |= direct | seenmarkers |= direct | ||||
pendingnodes -= seennodes | pendingnodes -= seennodes | ||||
seennodes |= pendingnodes | seennodes |= pendingnodes | ||||
return seenmarkers | return seenmarkers | ||||
def makestore(ui, repo): | def makestore(ui, repo): | ||||
"""Create an obsstore instance from a repo.""" | """Create an obsstore instance from a repo.""" | ||||
# read default format for new obsstore. | # read default format for new obsstore. |
deltalength = revlog.length | deltalength = revlog.length | ||||
deltaparent = revlog.deltaparent | deltaparent = revlog.deltaparent | ||||
sparse = revlog._sparserevlog | sparse = revlog._sparserevlog | ||||
good = None | good = None | ||||
deltas_limit = textlen * LIMIT_DELTA2TEXT | deltas_limit = textlen * LIMIT_DELTA2TEXT | ||||
tested = set([nullrev]) | tested = {nullrev} | ||||
candidates = _refinedgroups(revlog, p1, p2, cachedelta) | candidates = _refinedgroups(revlog, p1, p2, cachedelta) | ||||
while True: | while True: | ||||
temptative = candidates.send(good) | temptative = candidates.send(good) | ||||
if temptative is None: | if temptative is None: | ||||
break | break | ||||
group = [] | group = [] | ||||
for rev in temptative: | for rev in temptative: | ||||
# skip over empty delta (no need to include them in a chain) | # skip over empty delta (no need to include them in a chain) |
A bundle can be applied only if all its base revisions are known by | A bundle can be applied only if all its base revisions are known by | ||||
the client. | the client. | ||||
- At least one leaf of the bundle's DAG is missing on the client. | - At least one leaf of the bundle's DAG is missing on the client. | ||||
- Every leaf of the bundle's DAG is part of node set the client wants. | - Every leaf of the bundle's DAG is part of node set the client wants. | ||||
E.g. do not send a bundle of all changes if the client wants only | E.g. do not send a bundle of all changes if the client wants only | ||||
one specific branch of many. | one specific branch of many. | ||||
""" | """ | ||||
def decodehexstring(s): | def decodehexstring(s): | ||||
return set([binascii.unhexlify(h) for h in s.split(';')]) | return {binascii.unhexlify(h) for h in s.split(';')} | ||||
manifest = repo.vfs.tryread('pullbundles.manifest') | manifest = repo.vfs.tryread('pullbundles.manifest') | ||||
if not manifest: | if not manifest: | ||||
return None | return None | ||||
res = exchange.parseclonebundlesmanifest(repo, manifest) | res = exchange.parseclonebundlesmanifest(repo, manifest) | ||||
res = exchange.filterclonebundleentries(repo, res) | res = exchange.filterclonebundleentries(repo, res) | ||||
if not res: | if not res: | ||||
return None | return None |