This is the result of running a codemod script:
python ~/hg/contrib/codemod/codemod_nestedwith.py **/*.py
Plus a manual cleanup that removes the comment in absorb/__init.py.
See D76 for the codemod script.
( )
dsp |
Restricted Project |
This is the result of running a codemod script:
python ~/hg/contrib/codemod/codemod_nestedwith.py **/*.py
Plus a manual cleanup that removes the comment in absorb/__init.py.
See D76 for the codemod script.
Automatic diff as part of commit; lint not applicable. |
Automatic diff as part of commit; unit tests not applicable. |
Path | Packages | |||
---|---|---|---|---|
M | hgext3rd/absorb/__init__.py (21 lines) | |||
M | hgext3rd/fbamend/__init__.py (14 lines) | |||
M | hgext3rd/fbamend/movement.py (51 lines) | |||
M | hgext3rd/fbamend/restack.py (59 lines) | |||
M | hgext3rd/fbamend/unamend.py (51 lines) | |||
M | hgext3rd/fixcorrupt.py (31 lines) | |||
M | hgext3rd/pullcreatemarkers.py (5 lines) | |||
M | hgext3rd/uncommit.py (49 lines) | |||
M | hgext3rd/undo.py (5 lines) | |||
M | infinitepush/__init__.py (38 lines) | |||
M | infinitepush/backupcommands.py (18 lines) | |||
M | treemanifest/__init__.py (6 lines) |
@property | @property | ||||
def chunkstats(self): | def chunkstats(self): | ||||
"""-> {path: chunkstats}. collect chunkstats from filefixupstates""" | """-> {path: chunkstats}. collect chunkstats from filefixupstates""" | ||||
return dict((path, state.chunkstats) | return dict((path, state.chunkstats) | ||||
for path, state in self.fixupmap.iteritems()) | for path, state in self.fixupmap.iteritems()) | ||||
def commit(self): | def commit(self): | ||||
"""commit changes. update self.finalnode, self.replacemap""" | """commit changes. update self.finalnode, self.replacemap""" | ||||
with self.repo.wlock(): # update bookmarks | with self.repo.wlock(), self.repo.lock(): | ||||
with self.repo.lock(): # commit | |||||
with self.repo.transaction('absorb') as tr: | with self.repo.transaction('absorb') as tr: | ||||
self._commitstack() | self._commitstack() | ||||
self._movebookmarks(tr) | self._movebookmarks(tr) | ||||
if self.repo['.'].node() in self.replacemap: | if self.repo['.'].node() in self.replacemap: | ||||
self._moveworkingdirectoryparent() | self._moveworkingdirectoryparent() | ||||
if self._useobsolete: | if self._useobsolete: | ||||
self._obsoleteoldcommits() | self._obsoleteoldcommits() | ||||
if not self._useobsolete: # strip must be outside transactions | if not self._useobsolete: # strip must be outside transactions | ||||
self._stripoldcommits() | self._stripoldcommits() | ||||
return self.finalnode | return self.finalnode | ||||
def printchunkstats(self): | def printchunkstats(self): | ||||
"""print things like '1 of 2 chunk(s) applied'""" | """print things like '1 of 2 chunk(s) applied'""" | ||||
ui = self.ui | ui = self.ui | ||||
chunkstats = self.chunkstats | chunkstats = self.chunkstats | ||||
if ui.verbose: | if ui.verbose: | ||||
# chunkstats for each file | # chunkstats for each file |
ui.warn(education + "\n") | ui.warn(education + "\n") | ||||
def _fixbookmarks(repo, revs): | def _fixbookmarks(repo, revs): | ||||
"""Make any bookmarks pointing to the given revisions point to the | """Make any bookmarks pointing to the given revisions point to the | ||||
latest version of each respective revision. | latest version of each respective revision. | ||||
""" | """ | ||||
repo = repo.unfiltered() | repo = repo.unfiltered() | ||||
cl = repo.changelog | cl = repo.changelog | ||||
with repo.wlock(): | with repo.wlock(), repo.lock(), repo.transaction('movebookmarks') as tr: | ||||
with repo.lock(): | |||||
with repo.transaction('movebookmarks') as tr: | |||||
for rev in revs: | for rev in revs: | ||||
latest = cl.node(common.latest(repo, rev)) | latest = cl.node(common.latest(repo, rev)) | ||||
for bm in repo.nodebookmarks(cl.node(rev)): | for bm in repo.nodebookmarks(cl.node(rev)): | ||||
repo._bookmarks[bm] = latest | repo._bookmarks[bm] = latest | ||||
repo._bookmarks.recordchange(tr) | repo._bookmarks.recordchange(tr) | ||||
### bookmarks api compatibility layer ### | ### bookmarks api compatibility layer ### | ||||
def bmactivate(repo, mark): | def bmactivate(repo, mark): | ||||
try: | try: | ||||
return bookmarks.activate(repo, mark) | return bookmarks.activate(repo, mark) | ||||
except AttributeError: | except AttributeError: | ||||
return bookmarks.setcurrent(repo, mark) | return bookmarks.setcurrent(repo, mark) | ||||
def bmactive(repo): | def bmactive(repo): | ||||
try: | try: | ||||
return repo._activebookmark | return repo._activebookmark | ||||
except AttributeError: | except AttributeError: | ||||
return repo._bookmarkcurrent | return repo._bookmarkcurrent |
try: | try: | ||||
cmdutil.bailifchanged(repo) | cmdutil.bailifchanged(repo) | ||||
except error.Abort as e: | except error.Abort as e: | ||||
e.hint = _("use --merge to merge uncommitted changes") | e.hint = _("use --merge to merge uncommitted changes") | ||||
raise | raise | ||||
elif opts.get('rebase', False): | elif opts.get('rebase', False): | ||||
raise error.Abort(_("cannot use both --merge and --rebase")) | raise error.Abort(_("cannot use both --merge and --rebase")) | ||||
with repo.wlock(): | with repo.wlock(), repo.lock(): | ||||
with repo.lock(): | |||||
# Record the active bookmark, if any. | # Record the active bookmark, if any. | ||||
bookmark = repo._activebookmark | bookmark = repo._activebookmark | ||||
noactivate = opts.get('no_activate_bookmark', False) | noactivate = opts.get('no_activate_bookmark', False) | ||||
movebookmark = opts.get('move_bookmark', False) | movebookmark = opts.get('move_bookmark', False) | ||||
with repo.transaction('moverelative') as tr: | with repo.transaction('moverelative') as tr: | ||||
# Find the desired changeset. May potentially perform rebase. | # Find the desired changeset. May potentially perform rebase. | ||||
try: | try: | ||||
target = _findtarget(ui, repo, n, opts, reverse) | target = _findtarget(ui, repo, n, opts, reverse) | ||||
except error.InterventionRequired: | except error.InterventionRequired: | ||||
# Rebase failed. Need to manually close transaction to allow | # Rebase failed. Need to manually close transaction to allow | ||||
# `hg rebase --continue` to work correctly. | # `hg rebase --continue` to work correctly. | ||||
tr.close() | tr.close() | ||||
raise | raise | ||||
# Move the active bookmark if neccesary. Needs to happen before | # Move the active bookmark if neccesary. Needs to happen before | ||||
# we update to avoid getting a 'leaving bookmark X' message. | # we update to avoid getting a 'leaving bookmark X' message. | ||||
if movebookmark and bookmark is not None: | if movebookmark and bookmark is not None: | ||||
_setbookmark(repo, tr, bookmark, target) | _setbookmark(repo, tr, bookmark, target) | ||||
# Update to the target changeset. | # Update to the target changeset. | ||||
commands.update(ui, repo, rev=target) | commands.update(ui, repo, rev=target) | ||||
# Print out the changeset we landed on. | # Print out the changeset we landed on. | ||||
_showchangesets(ui, repo, revs=[target]) | _showchangesets(ui, repo, revs=[target]) | ||||
# Activate the bookmark on the new changeset. | # Activate the bookmark on the new changeset. | ||||
if not noactivate and not movebookmark: | if not noactivate and not movebookmark: | ||||
_activate(ui, repo, target) | _activate(ui, repo, target) | ||||
def _findtarget(ui, repo, n, opts, reverse): | def _findtarget(ui, repo, n, opts, reverse): | ||||
"""Find the appropriate target changeset for `hg previous` and | """Find the appropriate target changeset for `hg previous` and | ||||
`hg next` based on the provided options. May rebase the traversed | `hg next` based on the provided options. May rebase the traversed | ||||
changesets if the rebase option is given in the opts dict. | changesets if the rebase option is given in the opts dict. | ||||
""" | """ | ||||
towards = opts.get('towards') | towards = opts.get('towards') | ||||
newest = opts.get('newest', False) | newest = opts.get('newest', False) |
"""Repair a situation in which one or more changesets in a stack | """Repair a situation in which one or more changesets in a stack | ||||
have been obsoleted (thereby leaving their descendants in the stack | have been obsoleted (thereby leaving their descendants in the stack | ||||
unstable) by finding any such changesets and rebasing their descendants | unstable) by finding any such changesets and rebasing their descendants | ||||
onto the latest version of each respective changeset. | onto the latest version of each respective changeset. | ||||
""" | """ | ||||
if rebaseopts is None: | if rebaseopts is None: | ||||
rebaseopts = {} | rebaseopts = {} | ||||
with repo.wlock(): | with repo.wlock(), repo.lock(): | ||||
with repo.lock(): | |||||
cmdutil.checkunfinished(repo) | cmdutil.checkunfinished(repo) | ||||
cmdutil.bailifchanged(repo) | cmdutil.bailifchanged(repo) | ||||
# Find the latest version of the changeset at the botom of the | # Find the latest version of the changeset at the botom of the | ||||
# current stack. If the current changeset is public, simply start | # current stack. If the current changeset is public, simply start | ||||
# restacking from the current changeset with the assumption | # restacking from the current changeset with the assumption | ||||
# that there are non-public changesets higher up. | # that there are non-public changesets higher up. | ||||
base = repo.revs('::. & draft()').first() | base = repo.revs('::. & draft()').first() | ||||
latest = (common.latest(repo, base) if base is not None | latest = (common.latest(repo, base) if base is not None | ||||
else repo['.'].rev()) | else repo['.'].rev()) | ||||
targets = _findrestacktargets(repo, latest) | targets = _findrestacktargets(repo, latest) | ||||
with repo.transaction('restack') as tr: | with repo.transaction('restack') as tr: | ||||
# Attempt to stabilize all changesets that are or will be (after | # Attempt to stabilize all changesets that are or will be (after | ||||
# rebasing) descendants of base. | # rebasing) descendants of base. | ||||
for rev in targets: | for rev in targets: | ||||
try: | try: | ||||
common.restackonce(ui, repo, rev, rebaseopts) | common.restackonce(ui, repo, rev, rebaseopts) | ||||
except error.InterventionRequired: | except error.InterventionRequired: | ||||
tr.close() | tr.close() | ||||
raise | raise | ||||
# Ensure that we always end up on the latest version of the | # Ensure that we always end up on the latest version of the | ||||
# current changeset. Usually, this will be taken care of | # current changeset. Usually, this will be taken care of | ||||
# by the rebase operation. However, in some cases (such as | # by the rebase operation. However, in some cases (such as | ||||
# if we are on the precursor of the base changeset) the | # if we are on the precursor of the base changeset) the | ||||
# rebase will not update to the latest version, so we need | # rebase will not update to the latest version, so we need | ||||
# to do this manually. | # to do this manually. | ||||
successor = repo.revs('allsuccessors(.)').last() | successor = repo.revs('allsuccessors(.)').last() | ||||
if successor is not None: | if successor is not None: | ||||
commands.update(ui, repo, rev=successor) | commands.update(ui, repo, rev=successor) | ||||
def _findrestacktargets(repo, base): | def _findrestacktargets(repo, base): | ||||
"""Starting from the given base revision, do a BFS forwards through | """Starting from the given base revision, do a BFS forwards through | ||||
history, looking for changesets with unstable descendants on their | history, looking for changesets with unstable descendants on their | ||||
precursors. Returns a list of any such changesets, in a top-down | precursors. Returns a list of any such changesets, in a top-down | ||||
ordering that will allow all of the descendants of their precursors | ordering that will allow all of the descendants of their precursors | ||||
to be correctly rebased. | to be correctly rebased. | ||||
""" | """ |
raise error.Abort(e % len(markers)) | raise error.Abort(e % len(markers)) | ||||
precnode = markers[0].precnode() | precnode = markers[0].precnode() | ||||
precctx = unfi[precnode] | precctx = unfi[precnode] | ||||
if curctx.children(): | if curctx.children(): | ||||
raise error.Abort(_("cannot unamend in the middle of a stack")) | raise error.Abort(_("cannot unamend in the middle of a stack")) | ||||
with repo.wlock(): | with repo.wlock(), repo.lock(): | ||||
with repo.lock(): | |||||
repobookmarks = repo._bookmarks | repobookmarks = repo._bookmarks | ||||
ctxbookmarks = curctx.bookmarks() | ctxbookmarks = curctx.bookmarks() | ||||
changedfiles = [] | changedfiles = [] | ||||
wctx = repo[None] | wctx = repo[None] | ||||
wm = wctx.manifest() | wm = wctx.manifest() | ||||
cm = precctx.manifest() | cm = precctx.manifest() | ||||
dirstate = repo.dirstate | dirstate = repo.dirstate | ||||
diff = cm.diff(wm) | diff = cm.diff(wm) | ||||
changedfiles.extend(diff.iterkeys()) | changedfiles.extend(diff.iterkeys()) | ||||
tr = repo.transaction('unamend') | tr = repo.transaction('unamend') | ||||
with dirstate.parentchange(): | with dirstate.parentchange(): | ||||
dirstate.rebuild(precnode, cm, changedfiles) | dirstate.rebuild(precnode, cm, changedfiles) | ||||
# we want added and removed files to be shown | # we want added and removed files to be shown | ||||
# properly, not with ? and ! prefixes | # properly, not with ? and ! prefixes | ||||
for filename, data in diff.iteritems(): | for filename, data in diff.iteritems(): | ||||
if data[0][0] is None: | if data[0][0] is None: | ||||
dirstate.add(filename) | dirstate.add(filename) | ||||
if data[1][0] is None: | if data[1][0] is None: | ||||
dirstate.remove(filename) | dirstate.remove(filename) | ||||
for book in ctxbookmarks: | for book in ctxbookmarks: | ||||
repobookmarks[book] = precnode | repobookmarks[book] = precnode | ||||
repobookmarks.recordchange(tr) | repobookmarks.recordchange(tr) | ||||
obsolete.createmarkers(repo, [(curctx, (precctx,))]) | obsolete.createmarkers(repo, [(curctx, (precctx,))]) | ||||
tr.close() | tr.close() |
# sync broken revisions from manifest to changelog | # sync broken revisions from manifest to changelog | ||||
if 'manifest' in badrevs: | if 'manifest' in badrevs: | ||||
badlinkrev = badrevs['manifest'][1] | badlinkrev = badrevs['manifest'][1] | ||||
badrevs['changelog'] = (badlinkrev, badlinkrev) | badrevs['changelog'] = (badlinkrev, badlinkrev) | ||||
# truncate revlogs | # truncate revlogs | ||||
backupprefix = '%s-' % int(time.time()) | backupprefix = '%s-' % int(time.time()) | ||||
with repo.wlock(): | with repo.wlock(), repo.lock(): | ||||
with repo.lock(): | |||||
repo.destroying() | repo.destroying() | ||||
for name, log in logs: | for name, log in logs: | ||||
rev, linkrev = badrevs[name] | rev, linkrev = badrevs[name] | ||||
ui.write(_('%s: will lose %d revisions\n') | ui.write(_('%s: will lose %d revisions\n') | ||||
% (name, len(log) - 1 - rev)) | % (name, len(log) - 1 - rev)) | ||||
truncate(ui, repo, log.datafile, log.start(rev), dryrun, | truncate(ui, repo, log.datafile, log.start(rev), dryrun, | ||||
backupprefix) | backupprefix) | ||||
truncate(ui, repo, log.indexfile, rev * 64, dryrun, | truncate(ui, repo, log.indexfile, rev * 64, dryrun, | ||||
backupprefix) | backupprefix) | ||||
if dryrun: | if dryrun: | ||||
ui.write(_('re-run with --no-dryrun to fix.\n')) | ui.write(_('re-run with --no-dryrun to fix.\n')) | ||||
else: | else: | ||||
ui.write(_('fix completed. re-run to check more revisions.\n')) | ui.write(_('fix completed. re-run to check more revisions.\n')) | ||||
repo.destroyed() | repo.destroyed() |
n = unfiltered[rev] | n = unfiltered[rev] | ||||
diff = getdiff(n) | diff = getdiff(n) | ||||
if diff in landeddiffs and landeddiffs[diff].rev() != n.rev(): | if diff in landeddiffs and landeddiffs[diff].rev() != n.rev(): | ||||
tocreate.append((n, (landeddiffs[diff],))) | tocreate.append((n, (landeddiffs[diff],))) | ||||
if not tocreate: | if not tocreate: | ||||
return r | return r | ||||
with unfiltered.lock(): | with unfiltered.lock(), unfiltered.transaction('pullcreatemarkers'): | ||||
with unfiltered.transaction('pullcreatemarkers'): | |||||
obsolete.createmarkers(unfiltered, tocreate) | obsolete.createmarkers(unfiltered, tocreate) | ||||
return r | return r |
"""uncommit some or all of a local changeset | """uncommit some or all of a local changeset | ||||
This command undoes the effect of a local commit, returning the affected | This command undoes the effect of a local commit, returning the affected | ||||
files to their uncommitted state. This means that files modified or | files to their uncommitted state. This means that files modified or | ||||
deleted in the changeset will be left unchanged, and so will remain | deleted in the changeset will be left unchanged, and so will remain | ||||
modified in the working directory. | modified in the working directory. | ||||
""" | """ | ||||
with repo.wlock(): | with repo.wlock(), repo.lock(): | ||||
with repo.lock(): | |||||
wctx = repo[None] | wctx = repo[None] | ||||
if len(wctx.parents()) <= 0 or not wctx.parents()[0]: | if len(wctx.parents()) <= 0 or not wctx.parents()[0]: | ||||
raise error.Abort(_("cannot uncommit null changeset")) | raise error.Abort(_("cannot uncommit null changeset")) | ||||
if len(wctx.parents()) > 1: | if len(wctx.parents()) > 1: | ||||
raise error.Abort(_("cannot uncommit while merging")) | raise error.Abort(_("cannot uncommit while merging")) | ||||
old = repo['.'] | old = repo['.'] | ||||
oldphase = old.phase() | oldphase = old.phase() | ||||
if oldphase == phases.public: | if oldphase == phases.public: | ||||
raise error.Abort(_("cannot rewrite immutable changeset")) | raise error.Abort(_("cannot rewrite immutable changeset")) | ||||
if len(old.parents()) > 1: | if len(old.parents()) > 1: | ||||
raise error.Abort(_("cannot uncommit merge changeset")) | raise error.Abort(_("cannot uncommit merge changeset")) | ||||
with repo.transaction('uncommit') as tr: | with repo.transaction('uncommit') as tr: | ||||
match = scmutil.match(old, pats, opts) | match = scmutil.match(old, pats, opts) | ||||
newid = _commitfiltered(repo, old, match) | newid = _commitfiltered(repo, old, match) | ||||
if newid is None: | if newid is None: | ||||
raise error.Abort(_('nothing to uncommit')) | raise error.Abort(_('nothing to uncommit')) | ||||
# Move local changes on filtered changeset | # Move local changes on filtered changeset | ||||
obsolete.createmarkers(repo, [(old, (repo[newid],))]) | obsolete.createmarkers(repo, [(old, (repo[newid],))]) | ||||
phases.retractboundary(repo, tr, oldphase, [newid]) | phases.retractboundary(repo, tr, oldphase, [newid]) | ||||
with repo.dirstate.parentchange(): | with repo.dirstate.parentchange(): | ||||
repo.dirstate.setparents(newid, node.nullid) | repo.dirstate.setparents(newid, node.nullid) | ||||
_uncommitdirstate(repo, old, match) | _uncommitdirstate(repo, old, match) | ||||
_updatebookmarks(repo, old.node(), newid, tr) | _updatebookmarks(repo, old.node(), newid, tr) |
# record changes to repo | # record changes to repo | ||||
safelog(repo, command) | safelog(repo, command) | ||||
return result | return result | ||||
# Write: Log control | # Write: Log control | ||||
def safelog(repo, command): | def safelog(repo, command): | ||||
if repo is not None:# some hg commands don't require repo | if repo is not None:# some hg commands don't require repo | ||||
with repo.lock(): | with repo.lock(), repo.transaction("undolog"): | ||||
with repo.transaction("undolog"): | |||||
log(repo.filtered('visible'), command) | log(repo.filtered('visible'), command) | ||||
def log(repo, command): | def log(repo, command): | ||||
newnodes = { | newnodes = { | ||||
'bookmarks': _logbookmarks(repo), | 'bookmarks': _logbookmarks(repo), | ||||
'draftheads': _logdraftheads(repo), | 'draftheads': _logdraftheads(repo), | ||||
'workingparent': _logworkingparent(repo), | 'workingparent': _logworkingparent(repo), | ||||
} | } | ||||
try: | try: |
for bookmark, hexnode in newbookmarks.iteritems(): | for bookmark, hexnode in newbookmarks.iteritems(): | ||||
bookmarks[bookmark] = hexnode | bookmarks[bookmark] = hexnode | ||||
remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks) | remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks) | ||||
def _savelocalbookmarks(repo, bookmarks): | def _savelocalbookmarks(repo, bookmarks): | ||||
if not bookmarks: | if not bookmarks: | ||||
return | return | ||||
with repo.wlock(): | with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr: | ||||
with repo.lock(): | |||||
with repo.transaction('bookmark') as tr: | |||||
for scratchbook, node in bookmarks.iteritems(): | for scratchbook, node in bookmarks.iteritems(): | ||||
changectx = repo[node] | changectx = repo[node] | ||||
repo._bookmarks[scratchbook] = changectx.node() | repo._bookmarks[scratchbook] = changectx.node() | ||||
repo._bookmarks.recordchange(tr) | repo._bookmarks.recordchange(tr) | ||||
def _findcommonincoming(orig, *args, **kwargs): | def _findcommonincoming(orig, *args, **kwargs): | ||||
common, inc, remoteheads = orig(*args, **kwargs) | common, inc, remoteheads = orig(*args, **kwargs) | ||||
return common, True, remoteheads | return common, True, remoteheads | ||||
def _push(orig, ui, repo, dest=None, *args, **opts): | def _push(orig, ui, repo, dest=None, *args, **opts): | ||||
bookmark = opts.get('to') or '' | bookmark = opts.get('to') or '' | ||||
create = opts.get('create') or False | create = opts.get('create') or False | ||||
key = None | key = None | ||||
if newheadscount: | if newheadscount: | ||||
with open(bundlefile, 'r') as f: | with open(bundlefile, 'r') as f: | ||||
bundledata = f.read() | bundledata = f.read() | ||||
with logservicecall(log, 'bundlestore', | with logservicecall(log, 'bundlestore', | ||||
bundlesize=len(bundledata)): | bundlesize=len(bundledata)): | ||||
key = store.write(bundledata) | key = store.write(bundledata) | ||||
with logservicecall(log, 'index', newheadscount=newheadscount): | with logservicecall(log, 'index', newheadscount=newheadscount), index: | ||||
with index: | |||||
if key: | if key: | ||||
index.addbundle(key, nodesctx) | index.addbundle(key, nodesctx) | ||||
if bookmark: | if bookmark: | ||||
index.addbookmark(bookmark, bookmarknode) | index.addbookmark(bookmark, bookmarknode) | ||||
_maybeaddpushbackpart(op, bookmark, bookmarknode, | _maybeaddpushbackpart(op, bookmark, bookmarknode, | ||||
bookprevnode, params) | bookprevnode, params) | ||||
log(scratchbranchparttype, eventtype='success', | log(scratchbranchparttype, eventtype='success', | ||||
elapsedms=(time.time() - parthandlerstart) * 1000) | elapsedms=(time.time() - parthandlerstart) * 1000) | ||||
fillmetadatabranchpattern = op.repo.ui.config( | fillmetadatabranchpattern = op.repo.ui.config( | ||||
'infinitepush', 'fillmetadatabranchpattern', '') | 'infinitepush', 'fillmetadatabranchpattern', '') | ||||
if bookmark and fillmetadatabranchpattern: | if bookmark and fillmetadatabranchpattern: | ||||
__, __, matcher = util.stringmatcher(fillmetadatabranchpattern) | __, __, matcher = util.stringmatcher(fillmetadatabranchpattern) | ||||
if matcher(bookmark): | if matcher(bookmark): | ||||
toinsert = {} | toinsert = {} | ||||
todelete = [] | todelete = [] | ||||
for bookmark, node in decodedbookmarks.iteritems(): | for bookmark, node in decodedbookmarks.iteritems(): | ||||
if node: | if node: | ||||
toinsert[bookmark] = node | toinsert[bookmark] = node | ||||
else: | else: | ||||
todelete.append(bookmark) | todelete.append(bookmark) | ||||
log = _getorcreateinfinitepushlogger(op) | log = _getorcreateinfinitepushlogger(op) | ||||
with logservicecall(log, scratchbookmarksparttype): | with logservicecall(log, scratchbookmarksparttype), index: | ||||
with index: | |||||
if todelete: | if todelete: | ||||
index.deletebookmarks(todelete) | index.deletebookmarks(todelete) | ||||
if toinsert: | if toinsert: | ||||
index.addmanybookmarks(toinsert) | index.addmanybookmarks(toinsert) | ||||
def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params): | def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params): | ||||
if params.get('pushbackbookmarks'): | if params.get('pushbackbookmarks'): | ||||
if op.reply and 'pushback' in op.reply.capabilities: | if op.reply and 'pushback' in op.reply.capabilities: | ||||
params = { | params = { | ||||
'namespace': 'bookmarks', | 'namespace': 'bookmarks', | ||||
'key': bookmark, | 'key': bookmark, | ||||
'new': newnode, | 'new': newnode, |
pullcmd, pullopts = _getcommandandoptions('^pull') | pullcmd, pullopts = _getcommandandoptions('^pull') | ||||
# pull backuped heads and nodes that are pointed by bookmarks | # pull backuped heads and nodes that are pointed by bookmarks | ||||
pullopts['rev'] = list(backupstate.heads | | pullopts['rev'] = list(backupstate.heads | | ||||
set(backupstate.localbookmarks.values())) | set(backupstate.localbookmarks.values())) | ||||
if dest: | if dest: | ||||
pullopts['source'] = dest | pullopts['source'] = dest | ||||
result = pullcmd(ui, repo, **pullopts) | result = pullcmd(ui, repo, **pullopts) | ||||
with repo.wlock(): | with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr: | ||||
with repo.lock(): | |||||
with repo.transaction('bookmark') as tr: | |||||
for book, hexnode in backupstate.localbookmarks.iteritems(): | for book, hexnode in backupstate.localbookmarks.iteritems(): | ||||
if hexnode in repo: | if hexnode in repo: | ||||
repo._bookmarks[book] = bin(hexnode) | repo._bookmarks[book] = bin(hexnode) | ||||
else: | else: | ||||
ui.warn(_('%s not found, not creating %s bookmark') % | ui.warn(_('%s not found, not creating %s bookmark') % | ||||
(hexnode, book)) | (hexnode, book)) | ||||
repo._bookmarks.recordchange(tr) | repo._bookmarks.recordchange(tr) | ||||
return result | return result | ||||
@command('getavailablebackups', | @command('getavailablebackups', | ||||
[('', 'user', '', _('username, defaults to current user')), | [('', 'user', '', _('username, defaults to current user')), | ||||
('', 'json', None, _('print available backups in json format'))]) | ('', 'json', None, _('print available backups in json format'))]) | ||||
def getavailablebackups(ui, repo, dest=None, **opts): | def getavailablebackups(ui, repo, dest=None, **opts): | ||||
other = _getremote(repo, ui, dest, **opts) | other = _getremote(repo, ui, dest, **opts) |
with mutablehistorypack(repo.ui, packpath) as hpack: | with mutablehistorypack(repo.ui, packpath) as hpack: | ||||
recordmanifest(dpack, hpack, repo, mfrev1, mfrev2, | recordmanifest(dpack, hpack, repo, mfrev1, mfrev2, | ||||
verify=opts.get('verify', False)) | verify=opts.get('verify', False)) | ||||
@command('backfilltree', [ | @command('backfilltree', [ | ||||
('l', 'limit', '10000000', _('')) | ('l', 'limit', '10000000', _('')) | ||||
], _('hg backfilltree [OPTIONS]')) | ], _('hg backfilltree [OPTIONS]')) | ||||
def backfilltree(ui, repo, *args, **opts): | def backfilltree(ui, repo, *args, **opts): | ||||
with repo.wlock(): | with repo.wlock(), repo.lock(), repo.transaction('backfilltree') as tr: | ||||
with repo.lock(): | |||||
with repo.transaction('backfilltree') as tr: | |||||
_backfill(tr, repo, int(opts.get('limit'))) | _backfill(tr, repo, int(opts.get('limit'))) | ||||
def _backfill(tr, repo, limit): | def _backfill(tr, repo, limit): | ||||
ui = repo.ui | ui = repo.ui | ||||
cl = repo.changelog | cl = repo.changelog | ||||
mfl = repo.manifestlog | mfl = repo.manifestlog | ||||
tmfl = mfl.treemanifestlog | tmfl = mfl.treemanifestlog | ||||
treerevlog = tmfl._revlog | treerevlog = tmfl._revlog | ||||