This is the result of running:
python contrib/codemod/codemod_nestedwith.py **/*.py
martinvonz | |
durin42 |
hg-reviewers |
This is the result of running:
python contrib/codemod/codemod_nestedwith.py **/*.py
Automatic diff as part of commit; lint not applicable. |
Automatic diff as part of commit; unit tests not applicable. |
Suggestion: use "Whitespace Changes: Ignore All" to view the diff: https://phab.mercurial-scm.org/D77?vs=on&id=121&whitespace=ignore-all#toc
Queued.
Not sure if it's worth hanging on to the script to perform the edits. Thoughts?
Maybe put the script in commit message then? I ran it on fb-hgext so I guess other extension developers might want to run it on their repos too.
Path | Packages | |||
---|---|---|---|---|
M | hgext/largefiles/lfutil.py (14 lines) | |||
M | mercurial/cmdutil.py (277 lines) | |||
M | mercurial/debugcommands.py (5 lines) | |||
M | mercurial/upgrade.py (56 lines) |
def link(src, dest): | def link(src, dest): | ||||
"""Try to create hardlink - if that fails, efficiently make a copy.""" | """Try to create hardlink - if that fails, efficiently make a copy.""" | ||||
util.makedirs(os.path.dirname(dest)) | util.makedirs(os.path.dirname(dest)) | ||||
try: | try: | ||||
util.oslink(src, dest) | util.oslink(src, dest) | ||||
except OSError: | except OSError: | ||||
# if hardlinks fail, fallback on atomic copy | # if hardlinks fail, fallback on atomic copy | ||||
with open(src, 'rb') as srcf: | with open(src, 'rb') as srcf, util.atomictempfile(dest) as dstf: | ||||
with util.atomictempfile(dest) as dstf: | |||||
for chunk in util.filechunkiter(srcf): | for chunk in util.filechunkiter(srcf): | ||||
dstf.write(chunk) | dstf.write(chunk) | ||||
os.chmod(dest, os.stat(src).st_mode) | os.chmod(dest, os.stat(src).st_mode) | ||||
def usercachepath(ui, hash): | def usercachepath(ui, hash): | ||||
'''Return the correct location in the "global" largefiles cache for a file | '''Return the correct location in the "global" largefiles cache for a file | ||||
with the given hash. | with the given hash. | ||||
This cache is used for sharing of largefiles across repositories - both | This cache is used for sharing of largefiles across repositories - both | ||||
to preserve download bandwidth and storage space.''' | to preserve download bandwidth and storage space.''' | ||||
return os.path.join(_usercachedir(ui), hash) | return os.path.join(_usercachedir(ui), hash) | ||||
largefile exists in the cache).''' | largefile exists in the cache).''' | ||||
wvfs = repo.wvfs | wvfs = repo.wvfs | ||||
path = findfile(repo, hash) | path = findfile(repo, hash) | ||||
if path is None: | if path is None: | ||||
return False | return False | ||||
wvfs.makedirs(wvfs.dirname(wvfs.join(filename))) | wvfs.makedirs(wvfs.dirname(wvfs.join(filename))) | ||||
# The write may fail before the file is fully written, but we | # The write may fail before the file is fully written, but we | ||||
# don't use atomic writes in the working copy. | # don't use atomic writes in the working copy. | ||||
with open(path, 'rb') as srcfd: | with open(path, 'rb') as srcfd, wvfs(filename, 'wb') as destfd: | ||||
with wvfs(filename, 'wb') as destfd: | |||||
gothash = copyandhash( | gothash = copyandhash( | ||||
util.filechunkiter(srcfd), destfd) | util.filechunkiter(srcfd), destfd) | ||||
if gothash != hash: | if gothash != hash: | ||||
repo.ui.warn(_('%s: data corruption in %s with hash %s\n') | repo.ui.warn(_('%s: data corruption in %s with hash %s\n') | ||||
% (filename, path, gothash)) | % (filename, path, gothash)) | ||||
wvfs.unlink(filename) | wvfs.unlink(filename) | ||||
return False | return False | ||||
return True | return True | ||||
def copytostore(repo, ctx, file, fstandin): | def copytostore(repo, ctx, file, fstandin): |
# marker creation requires that the current user's name is specified. | # marker creation requires that the current user's name is specified. | ||||
if obsolete.isenabled(repo, obsolete.createmarkersopt): | if obsolete.isenabled(repo, obsolete.createmarkersopt): | ||||
ui.username() # raise exception if username not set | ui.username() # raise exception if username not set | ||||
ui.note(_('amending changeset %s\n') % old) | ui.note(_('amending changeset %s\n') % old) | ||||
base = old.p1() | base = old.p1() | ||||
newid = None | newid = None | ||||
with repo.wlock(), repo.lock(): | with repo.wlock(), repo.lock(), repo.transaction('amend') as tr: | ||||
with repo.transaction('amend') as tr: | |||||
# See if we got a message from -m or -l, if not, open the editor | # See if we got a message from -m or -l, if not, open the editor | ||||
# with the message of the changeset to amend | # with the message of the changeset to amend | ||||
message = logmessage(ui, opts) | message = logmessage(ui, opts) | ||||
# ensure logfile does not conflict with later enforcement of the | # ensure logfile does not conflict with later enforcement of the | ||||
# message. potential logfile content has been processed by | # message. potential logfile content has been processed by | ||||
# `logmessage` anyway. | # `logmessage` anyway. | ||||
opts.pop('logfile') | opts.pop('logfile') | ||||
# First, do a regular commit to record all changes in the working | # First, do a regular commit to record all changes in the working | ||||
# directory (if there are any) | # directory (if there are any) | ||||
ui.callhooks = False | ui.callhooks = False | ||||
activebookmark = repo._bookmarks.active | activebookmark = repo._bookmarks.active | ||||
try: | try: | ||||
repo._bookmarks.active = None | repo._bookmarks.active = None | ||||
opts['message'] = 'temporary amend commit for %s' % old | opts['message'] = 'temporary amend commit for %s' % old | ||||
node = commit(ui, repo, commitfunc, pats, opts) | node = commit(ui, repo, commitfunc, pats, opts) | ||||
finally: | finally: | ||||
repo._bookmarks.active = activebookmark | repo._bookmarks.active = activebookmark | ||||
repo._bookmarks.recordchange(tr) | repo._bookmarks.recordchange(tr) | ||||
ui.callhooks = True | ui.callhooks = True | ||||
ctx = repo[node] | ctx = repo[node] | ||||
# Participating changesets: | # Participating changesets: | ||||
# | # | ||||
# node/ctx o - new (intermediate) commit that contains changes | # node/ctx o - new (intermediate) commit that contains changes | ||||
# | from working dir to go into amending commit | # | from working dir to go into amending commit | ||||
# | (or a workingctx if there were no changes) | # | (or a workingctx if there were no changes) | ||||
# | | # | | ||||
# old o - changeset to amend | # old o - changeset to amend | ||||
# | | # | | ||||
# base o - parent of amending changeset | # base o - parent of amending changeset | ||||
# Update extra dict from amended commit (e.g. to preserve graft | # Update extra dict from amended commit (e.g. to preserve graft | ||||
# source) | # source) | ||||
extra.update(old.extra()) | extra.update(old.extra()) | ||||
# Also update it from the intermediate commit or from the wctx | # Also update it from the intermediate commit or from the wctx | ||||
extra.update(ctx.extra()) | extra.update(ctx.extra()) | ||||
if len(old.parents()) > 1: | if len(old.parents()) > 1: | ||||
# ctx.files() isn't reliable for merges, so fall back to the | # ctx.files() isn't reliable for merges, so fall back to the | ||||
# slower repo.status() method | # slower repo.status() method | ||||
files = set([fn for st in repo.status(base, old)[:3] | files = set([fn for st in repo.status(base, old)[:3] | ||||
for fn in st]) | for fn in st]) | ||||
else: | else: | ||||
files = set(old.files()) | files = set(old.files()) | ||||
# Second, we use either the commit we just did, or if there were no | # Second, we use either the commit we just did, or if there were no | ||||
# changes the parent of the working directory as the version of the | # changes the parent of the working directory as the version of the | ||||
# files in the final amend commit | # files in the final amend commit | ||||
if node: | if node: | ||||
ui.note(_('copying changeset %s to %s\n') % (ctx, base)) | ui.note(_('copying changeset %s to %s\n') % (ctx, base)) | ||||
user = ctx.user() | user = ctx.user() | ||||
date = ctx.date() | date = ctx.date() | ||||
# Recompute copies (avoid recording a -> b -> a) | # Recompute copies (avoid recording a -> b -> a) | ||||
copied = copies.pathcopies(base, ctx) | copied = copies.pathcopies(base, ctx) | ||||
if old.p2: | if old.p2: | ||||
copied.update(copies.pathcopies(old.p2(), ctx)) | copied.update(copies.pathcopies(old.p2(), ctx)) | ||||
# Prune files which were reverted by the updates: if old | # Prune files which were reverted by the updates: if old | ||||
# introduced file X and our intermediate commit, node, | # introduced file X and our intermediate commit, node, | ||||
# renamed that file, then those two files are the same and | # renamed that file, then those two files are the same and | ||||
# we can discard X from our list of files. Likewise if X | # we can discard X from our list of files. Likewise if X | ||||
# was deleted, it's no longer relevant | # was deleted, it's no longer relevant | ||||
files.update(ctx.files()) | files.update(ctx.files()) | ||||
files = [f for f in files if not samefile(f, ctx, base)] | files = [f for f in files if not samefile(f, ctx, base)] | ||||
def filectxfn(repo, ctx_, path): | def filectxfn(repo, ctx_, path): | ||||
try: | try: | ||||
fctx = ctx[path] | fctx = ctx[path] | ||||
flags = fctx.flags() | flags = fctx.flags() | ||||
mctx = context.memfilectx(repo, | mctx = context.memfilectx(repo, | ||||
fctx.path(), fctx.data(), | fctx.path(), fctx.data(), | ||||
islink='l' in flags, | islink='l' in flags, | ||||
isexec='x' in flags, | isexec='x' in flags, | ||||
copied=copied.get(path)) | copied=copied.get(path)) | ||||
return mctx | return mctx | ||||
except KeyError: | except KeyError: | ||||
return None | return None | ||||
else: | else: | ||||
ui.note(_('copying changeset %s to %s\n') % (old, base)) | ui.note(_('copying changeset %s to %s\n') % (old, base)) | ||||
# Use version of files as in the old cset | # Use version of files as in the old cset | ||||
def filectxfn(repo, ctx_, path): | def filectxfn(repo, ctx_, path): | ||||
try: | try: | ||||
return old.filectx(path) | return old.filectx(path) | ||||
except KeyError: | except KeyError: | ||||
return None | return None | ||||
user = opts.get('user') or old.user() | user = opts.get('user') or old.user() | ||||
date = opts.get('date') or old.date() | date = opts.get('date') or old.date() | ||||
editform = mergeeditform(old, 'commit.amend') | editform = mergeeditform(old, 'commit.amend') | ||||
editor = getcommiteditor(editform=editform, | editor = getcommiteditor(editform=editform, | ||||
**pycompat.strkwargs(opts)) | **pycompat.strkwargs(opts)) | ||||
if not message: | if not message: | ||||
editor = getcommiteditor(edit=True, editform=editform) | editor = getcommiteditor(edit=True, editform=editform) | ||||
message = old.description() | message = old.description() | ||||
pureextra = extra.copy() | pureextra = extra.copy() | ||||
extra['amend_source'] = old.hex() | extra['amend_source'] = old.hex() | ||||
new = context.memctx(repo, | new = context.memctx(repo, | ||||
parents=[base.node(), old.p2().node()], | parents=[base.node(), old.p2().node()], | ||||
text=message, | text=message, | ||||
files=files, | files=files, | ||||
filectxfn=filectxfn, | filectxfn=filectxfn, | ||||
user=user, | user=user, | ||||
date=date, | date=date, | ||||
extra=extra, | extra=extra, | ||||
editor=editor) | editor=editor) | ||||
newdesc = changelog.stripdesc(new.description()) | newdesc = changelog.stripdesc(new.description()) | ||||
if ((not node) | if ((not node) | ||||
and newdesc == old.description() | and newdesc == old.description() | ||||
and user == old.user() | and user == old.user() | ||||
and date == old.date() | and date == old.date() | ||||
and pureextra == old.extra()): | and pureextra == old.extra()): | ||||
# nothing changed. continuing here would create a new node | # nothing changed. continuing here would create a new node | ||||
# anyway because of the amend_source noise. | # anyway because of the amend_source noise. | ||||
# | # | ||||
# This not what we expect from amend. | # This not what we expect from amend. | ||||
return old.node() | return old.node() | ||||
ph = repo.ui.config('phases', 'new-commit', phases.draft) | ph = repo.ui.config('phases', 'new-commit', phases.draft) | ||||
try: | try: | ||||
if opts.get('secret'): | if opts.get('secret'): | ||||
commitphase = 'secret' | commitphase = 'secret' | ||||
else: | else: | ||||
commitphase = old.phase() | commitphase = old.phase() | ||||
repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend') | repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend') | ||||
newid = repo.commitctx(new) | newid = repo.commitctx(new) | ||||
finally: | finally: | ||||
repo.ui.setconfig('phases', 'new-commit', ph, 'amend') | repo.ui.setconfig('phases', 'new-commit', ph, 'amend') | ||||
if newid != old.node(): | if newid != old.node(): | ||||
# Reroute the working copy parent to the new changeset | # Reroute the working copy parent to the new changeset | ||||
repo.setparents(newid, nullid) | repo.setparents(newid, nullid) | ||||
mapping = {old.node(): (newid,)} | mapping = {old.node(): (newid,)} | ||||
if node: | if node: | ||||
mapping[node] = () | mapping[node] = () | ||||
scmutil.cleanupnodes(repo, mapping, 'amend') | scmutil.cleanupnodes(repo, mapping, 'amend') | ||||
return newid | return newid | ||||
def commiteditor(repo, ctx, subs, editform=''): | def commiteditor(repo, ctx, subs, editform=''): | ||||
if ctx.description(): | if ctx.description(): | ||||
return ctx.description() | return ctx.description() | ||||
return commitforceeditor(repo, ctx, subs, editform=editform, | return commitforceeditor(repo, ctx, subs, editform=editform, | ||||
unchangedmessagedetection=True) | unchangedmessagedetection=True) | ||||
displayer = cmdutil.makelogtemplater(ui, repo, tmpl) | displayer = cmdutil.makelogtemplater(ui, repo, tmpl) | ||||
for r in revs: | for r in revs: | ||||
displayer.show(repo[r], **pycompat.strkwargs(props)) | displayer.show(repo[r], **pycompat.strkwargs(props)) | ||||
displayer.close() | displayer.close() | ||||
@command('debugupdatecaches', []) | @command('debugupdatecaches', []) | ||||
def debugupdatecaches(ui, repo, *pats, **opts): | def debugupdatecaches(ui, repo, *pats, **opts): | ||||
"""warm all known caches in the repository""" | """warm all known caches in the repository""" | ||||
with repo.wlock(): | with repo.wlock(), repo.lock(): | ||||
with repo.lock(): | |||||
repo.updatecaches() | repo.updatecaches() | ||||
@command('debugupgraderepo', [ | @command('debugupgraderepo', [ | ||||
('o', 'optimize', [], _('extra optimization to perform'), _('NAME')), | ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')), | ||||
('', 'run', False, _('performs an upgrade')), | ('', 'run', False, _('performs an upgrade')), | ||||
]) | ]) | ||||
def debugupgraderepo(ui, repo, run=False, optimize=None): | def debugupgraderepo(ui, repo, run=False, optimize=None): | ||||
"""upgrade a repository to use different features | """upgrade a repository to use different features | ||||
# Else we're in the run=true case. | # Else we're in the run=true case. | ||||
ui.write(_('upgrade will perform the following actions:\n\n')) | ui.write(_('upgrade will perform the following actions:\n\n')) | ||||
printrequirements() | printrequirements() | ||||
printupgradeactions() | printupgradeactions() | ||||
upgradeactions = [a.name for a in actions] | upgradeactions = [a.name for a in actions] | ||||
ui.write(_('beginning upgrade...\n')) | ui.write(_('beginning upgrade...\n')) | ||||
with repo.wlock(): | with repo.wlock(), repo.lock(): | ||||
with repo.lock(): | |||||
ui.write(_('repository locked and read-only\n')) | ui.write(_('repository locked and read-only\n')) | ||||
# Our strategy for upgrading the repository is to create a new, | # Our strategy for upgrading the repository is to create a new, | ||||
# temporary repository, write data to it, then do a swap of the | # temporary repository, write data to it, then do a swap of the | ||||
# data. There are less heavyweight ways to do this, but it is easier | # data. There are less heavyweight ways to do this, but it is easier | ||||
# to create a new repo object than to instantiate all the components | # to create a new repo object than to instantiate all the components | ||||
# (like the store) separately. | # (like the store) separately. | ||||
tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path) | tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path) | ||||
backuppath = None | backuppath = None | ||||
try: | try: | ||||
ui.write(_('creating temporary repository to stage migrated ' | ui.write(_('creating temporary repository to stage migrated ' | ||||
'data: %s\n') % tmppath) | 'data: %s\n') % tmppath) | ||||
dstrepo = localrepo.localrepository(repo.baseui, | dstrepo = localrepo.localrepository(repo.baseui, | ||||
path=tmppath, | path=tmppath, | ||||
create=True) | create=True) | ||||
with dstrepo.wlock(): | with dstrepo.wlock(), dstrepo.lock(): | ||||
with dstrepo.lock(): | |||||
backuppath = _upgraderepo(ui, repo, dstrepo, newreqs, | backuppath = _upgraderepo(ui, repo, dstrepo, newreqs, | ||||
upgradeactions) | upgradeactions) | ||||
finally: | finally: | ||||
ui.write(_('removing temporary repository %s\n') % tmppath) | ui.write(_('removing temporary repository %s\n') % tmppath) | ||||
repo.vfs.rmtree(tmppath, forcibly=True) | repo.vfs.rmtree(tmppath, forcibly=True) | ||||
if backuppath: | if backuppath: | ||||
ui.warn(_('copy of old repository backed up at %s\n') % | ui.warn(_('copy of old repository backed up at %s\n') % | ||||
backuppath) | backuppath) | ||||
ui.warn(_('the old repository will not be deleted; remove ' | ui.warn(_('the old repository will not be deleted; remove ' | ||||
'it to free up disk space once the upgraded ' | 'it to free up disk space once the upgraded ' | ||||
'repository is verified\n')) | 'repository is verified\n')) |