Thanks to Kyle for noticing this and for providing the regular
expression to run on the codebase.
This patch has been reviewed by the test suite and they approved of
it.
- skip-blame: fallout from mass reformatting
indygreg | |
durin42 | |
spectral | |
pulkit |
hg-reviewers |
Thanks to Kyle for noticing this and for providing the regular
expression to run on the codebase.
This patch has been reviewed by the test suite and they approved of
it.
Automatic diff as part of commit; lint not applicable. |
Automatic diff as part of commit; unit tests not applicable. |
Some slightly disappointing loss of clarity where we had originally had something like:
foo(b'abc\n' b'def')
and this became foo('abc\n' b'def'), which is now foo('abc\ndef'): I liked the newlines ending the string and us being able to see an actual newline in the source, but that's already been lost, and this isn't making it worse in any way.
nit: add a # skip-blame: annotation to help us in the future (maybe check history to look for how it's been annotated in the past)
In D7028#103123, @durin42 wrote:nit: add a # skip-blame: annotation to help us in the future (maybe check history to look for how it's been annotated in the past)
Done.
Status | Author | Revision | |
---|---|---|---|
Closed | martinvonz | ||
Closed | martinvonz |
rev = int(rev) | rev = int(rev) | ||||
if util.safehasattr(repo.manifestlog, b'getstorage'): | if util.safehasattr(repo.manifestlog, b'getstorage'): | ||||
t = repo.manifestlog.getstorage(b'').node(rev) | t = repo.manifestlog.getstorage(b'').node(rev) | ||||
else: | else: | ||||
t = repo.manifestlog._revlog.lookup(rev) | t = repo.manifestlog._revlog.lookup(rev) | ||||
except ValueError: | except ValueError: | ||||
raise error.Abort( | raise error.Abort( | ||||
b'manifest revision must be integer or full ' b'node' | b'manifest revision must be integer or full node' | ||||
) | ) | ||||
def d(): | def d(): | ||||
repo.manifestlog.clearcaches(clear_persisted_data=clear_disk) | repo.manifestlog.clearcaches(clear_persisted_data=clear_disk) | ||||
repo.manifestlog[t].read() | repo.manifestlog[t].read() | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() |
allow = buildmatch(ui, repo, user, b'acl.allow') | allow = buildmatch(ui, repo, user, b'acl.allow') | ||||
deny = buildmatch(ui, repo, user, b'acl.deny') | deny = buildmatch(ui, repo, user, b'acl.deny') | ||||
for rev in pycompat.xrange(repo[node].rev(), len(repo)): | for rev in pycompat.xrange(repo[node].rev(), len(repo)): | ||||
ctx = repo[rev] | ctx = repo[rev] | ||||
branch = ctx.branch() | branch = ctx.branch() | ||||
if denybranches and denybranches(branch): | if denybranches and denybranches(branch): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'acl: user "%s" denied on branch "%s"' b' (changeset "%s")') | _(b'acl: user "%s" denied on branch "%s" (changeset "%s")') | ||||
% (user, branch, ctx) | % (user, branch, ctx) | ||||
) | ) | ||||
if allowbranches and not allowbranches(branch): | if allowbranches and not allowbranches(branch): | ||||
raise error.Abort( | raise error.Abort( | ||||
_( | _( | ||||
b'acl: user "%s" not allowed on branch "%s"' | b'acl: user "%s" not allowed on branch "%s"' | ||||
b' (changeset "%s")' | b' (changeset "%s")' | ||||
) | ) | ||||
% (user, branch, ctx) | % (user, branch, ctx) | ||||
) | ) | ||||
ui.debug( | ui.debug( | ||||
b'acl: branch access granted: "%s" on branch "%s"\n' % (ctx, branch) | b'acl: branch access granted: "%s" on branch "%s"\n' % (ctx, branch) | ||||
) | ) | ||||
for f in ctx.files(): | for f in ctx.files(): | ||||
if deny and deny(f): | if deny and deny(f): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'acl: user "%s" denied on "%s"' b' (changeset "%s")') | _(b'acl: user "%s" denied on "%s" (changeset "%s")') | ||||
% (user, f, ctx) | % (user, f, ctx) | ||||
) | ) | ||||
if allow and not allow(f): | if allow and not allow(f): | ||||
raise error.Abort( | raise error.Abort( | ||||
_( | _( | ||||
b'acl: user "%s" not allowed on "%s"' | b'acl: user "%s" not allowed on "%s"' | ||||
b' (changeset "%s")' | b' (changeset "%s")' | ||||
) | ) | ||||
% (user, f, ctx) | % (user, f, ctx) | ||||
) | ) | ||||
ui.debug(b'acl: path access granted: "%s"\n' % ctx) | ui.debug(b'acl: path access granted: "%s"\n' % ctx) |
self.encoding = b'utf-8' | self.encoding = b'utf-8' | ||||
def checkhexformat(self, revstr, mapname=b'splicemap'): | def checkhexformat(self, revstr, mapname=b'splicemap'): | ||||
""" fails if revstr is not a 40 byte hex. mercurial and git both uses | """ fails if revstr is not a 40 byte hex. mercurial and git both uses | ||||
such format for their revision numbering | such format for their revision numbering | ||||
""" | """ | ||||
if not re.match(br'[0-9a-fA-F]{40,40}$', revstr): | if not re.match(br'[0-9a-fA-F]{40,40}$', revstr): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'%s entry %s is not a valid revision' b' identifier') | _(b'%s entry %s is not a valid revision identifier') | ||||
% (mapname, revstr) | % (mapname, revstr) | ||||
) | ) | ||||
def before(self): | def before(self): | ||||
pass | pass | ||||
def after(self): | def after(self): | ||||
pass | pass |
log = [] # list of logentry objects containing the CVS state | log = [] # list of logentry objects containing the CVS state | ||||
# patterns to match in CVS (r)log output, by state of use | # patterns to match in CVS (r)log output, by state of use | ||||
re_00 = re.compile(b'RCS file: (.+)$') | re_00 = re.compile(b'RCS file: (.+)$') | ||||
re_01 = re.compile(b'cvs \\[r?log aborted\\]: (.+)$') | re_01 = re.compile(b'cvs \\[r?log aborted\\]: (.+)$') | ||||
re_02 = re.compile(b'cvs (r?log|server): (.+)\n$') | re_02 = re.compile(b'cvs (r?log|server): (.+)\n$') | ||||
re_03 = re.compile( | re_03 = re.compile( | ||||
b"(Cannot access.+CVSROOT)|" b"(can't create temporary directory.+)$" | b"(Cannot access.+CVSROOT)|(can't create temporary directory.+)$" | ||||
) | ) | ||||
re_10 = re.compile(b'Working file: (.+)$') | re_10 = re.compile(b'Working file: (.+)$') | ||||
re_20 = re.compile(b'symbolic names:') | re_20 = re.compile(b'symbolic names:') | ||||
re_30 = re.compile(b'\t(.+): ([\\d.]+)$') | re_30 = re.compile(b'\t(.+): ([\\d.]+)$') | ||||
re_31 = re.compile(b'----------------------------$') | re_31 = re.compile(b'----------------------------$') | ||||
re_32 = re.compile( | re_32 = re.compile( | ||||
b'=======================================' | b'=======================================' | ||||
b'======================================$' | b'======================================$' | ||||
state = 0 | state = 0 | ||||
elif state == 4: | elif state == 4: | ||||
# expecting '------' separator before first revision | # expecting '------' separator before first revision | ||||
if re_31.match(line): | if re_31.match(line): | ||||
state = 5 | state = 5 | ||||
else: | else: | ||||
assert not re_32.match(line), _( | assert not re_32.match(line), _( | ||||
b'must have at least ' b'some revisions' | b'must have at least some revisions' | ||||
) | ) | ||||
elif state == 5: | elif state == 5: | ||||
# expecting revision number and possibly (ignored) lock indication | # expecting revision number and possibly (ignored) lock indication | ||||
# we create the logentry here from values stored in states 0 to 4, | # we create the logentry here from values stored in states 0 to 4, | ||||
# as this state is re-entered for subsequent revisions of a file. | # as this state is re-entered for subsequent revisions of a file. | ||||
match = re_50.match(line) | match = re_50.match(line) | ||||
assert match, _(b'expected revision number') | assert match, _(b'expected revision number') | ||||
) | ) | ||||
break | break | ||||
except UnicodeDecodeError: | except UnicodeDecodeError: | ||||
pass # try next encoding | pass # try next encoding | ||||
except LookupError as inst: # unknown encoding, maybe | except LookupError as inst: # unknown encoding, maybe | ||||
raise error.Abort( | raise error.Abort( | ||||
inst, | inst, | ||||
hint=_( | hint=_( | ||||
b'check convert.cvsps.logencoding' b' configuration' | b'check convert.cvsps.logencoding configuration' | ||||
), | ), | ||||
) | ) | ||||
else: | else: | ||||
raise error.Abort( | raise error.Abort( | ||||
_( | _( | ||||
b"no encoding can transcode" | b"no encoding can transcode" | ||||
b" CVS log message for %s of %s" | b" CVS log message for %s of %s" | ||||
) | ) | ||||
% (revstr(entry.revision), entry.file), | % (revstr(entry.revision), entry.file), | ||||
hint=_( | hint=_(b'check convert.cvsps.logencoding configuration'), | ||||
b'check convert.cvsps.logencoding' b' configuration' | |||||
), | |||||
) | ) | ||||
hook.hook(ui, None, b"cvslog", True, log=log) | hook.hook(ui, None, b"cvslog", True, log=log) | ||||
return log | return log | ||||
class changeset(object): | class changeset(object): |
def retrievegitmodules(self, version): | def retrievegitmodules(self, version): | ||||
modules, ret = self.gitrun( | modules, ret = self.gitrun( | ||||
b'show', b'%s:%s' % (version, b'.gitmodules') | b'show', b'%s:%s' % (version, b'.gitmodules') | ||||
) | ) | ||||
if ret: | if ret: | ||||
# This can happen if a file is in the repo that has permissions | # This can happen if a file is in the repo that has permissions | ||||
# 160000, but there is no .gitmodules file. | # 160000, but there is no .gitmodules file. | ||||
self.ui.warn( | self.ui.warn( | ||||
_(b"warning: cannot read submodules config file in " b"%s\n") | _(b"warning: cannot read submodules config file in %s\n") | ||||
% version | % version | ||||
) | ) | ||||
return | return | ||||
try: | try: | ||||
self.parsegitmodules(modules) | self.parsegitmodules(modules) | ||||
except error.ParseError: | except error.ParseError: | ||||
self.ui.warn( | self.ui.warn( |
if not self.revs: | if not self.revs: | ||||
return self.mtnrun(b"leaves").splitlines() | return self.mtnrun(b"leaves").splitlines() | ||||
else: | else: | ||||
return self.revs | return self.revs | ||||
def getchanges(self, rev, full): | def getchanges(self, rev, full): | ||||
if full: | if full: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"convert from monotone does not support " b"--full") | _(b"convert from monotone does not support --full") | ||||
) | ) | ||||
revision = self.mtnrun(b"get_revision", rev).split(b"\n\n") | revision = self.mtnrun(b"get_revision", rev).split(b"\n\n") | ||||
files = {} | files = {} | ||||
ignoremove = {} | ignoremove = {} | ||||
renameddirs = [] | renameddirs = [] | ||||
copies = {} | copies = {} | ||||
for e in revision: | for e in revision: | ||||
m = self.add_file_re.match(e) | m = self.add_file_re.match(e) | ||||
def before(self): | def before(self): | ||||
# Check if we have a new enough version to use automate stdio | # Check if we have a new enough version to use automate stdio | ||||
try: | try: | ||||
versionstr = self.mtnrunsingle(b"interface_version") | versionstr = self.mtnrunsingle(b"interface_version") | ||||
version = float(versionstr) | version = float(versionstr) | ||||
except Exception: | except Exception: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"unable to determine mtn automate interface " b"version") | _(b"unable to determine mtn automate interface version") | ||||
) | ) | ||||
if version >= 12.0: | if version >= 12.0: | ||||
self.automatestdio = True | self.automatestdio = True | ||||
self.ui.debug( | self.ui.debug( | ||||
b"mtn automate version %f - using automate stdio\n" % version | b"mtn automate version %f - using automate stdio\n" % version | ||||
) | ) | ||||
def debugsvnlog(ui, **opts): | def debugsvnlog(ui, **opts): | ||||
"""Fetch SVN log in a subprocess and channel them back to parent to | """Fetch SVN log in a subprocess and channel them back to parent to | ||||
avoid memory collection issues. | avoid memory collection issues. | ||||
""" | """ | ||||
if svn is None: | if svn is None: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'debugsvnlog could not load Subversion python ' b'bindings') | _(b'debugsvnlog could not load Subversion python bindings') | ||||
) | ) | ||||
args = decodeargs(ui.fin.read()) | args = decodeargs(ui.fin.read()) | ||||
get_log_child(ui.fout, *args) | get_log_child(ui.fout, *args) | ||||
class logstream(object): | class logstream(object): | ||||
"""Interruptible revision log iterator.""" | """Interruptible revision log iterator.""" | ||||
""" fails if revision format does not match the correct format""" | """ fails if revision format does not match the correct format""" | ||||
if not re.match( | if not re.match( | ||||
r'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-' | r'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-' | ||||
r'[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]' | r'[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]' | ||||
r'{12,12}(.*)\@[0-9]+$', | r'{12,12}(.*)\@[0-9]+$', | ||||
revstr, | revstr, | ||||
): | ): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'%s entry %s is not a valid revision' b' identifier') | _(b'%s entry %s is not a valid revision identifier') | ||||
% (mapname, revstr) | % (mapname, revstr) | ||||
) | ) | ||||
def numcommits(self): | def numcommits(self): | ||||
return int(self.head.rsplit(b'@', 1)[1]) - self.startrev | return int(self.head.rsplit(b'@', 1)[1]) - self.startrev | ||||
def gettags(self): | def gettags(self): | ||||
tags = {} | tags = {} |
data = fctx.data() | data = fctx.data() | ||||
if stringutil.binary(data): | if stringutil.binary(data): | ||||
# We should not abort here, since the user should | # We should not abort here, since the user should | ||||
# be able to say "** = native" to automatically | # be able to say "** = native" to automatically | ||||
# have all non-binary files taken care of. | # have all non-binary files taken care of. | ||||
continue | continue | ||||
if inconsistenteol(data): | if inconsistenteol(data): | ||||
raise errormod.Abort( | raise errormod.Abort( | ||||
_(b"inconsistent newline style " b"in %s\n") % f | _(b"inconsistent newline style in %s\n") % f | ||||
) | ) | ||||
return super(eolrepo, self).commitctx(ctx, error, origctx) | return super(eolrepo, self).commitctx(ctx, error, origctx) | ||||
repo.__class__ = eolrepo | repo.__class__ = eolrepo | ||||
repo._hgcleardirstate() | repo._hgcleardirstate() |
(b'f', b'file', None, _(b'list the filename')), | (b'f', b'file', None, _(b'list the filename')), | ||||
(b'd', b'date', None, _(b'list the date (short with -q)')), | (b'd', b'date', None, _(b'list the date (short with -q)')), | ||||
(b'n', b'number', None, _(b'list the revision number (default)')), | (b'n', b'number', None, _(b'list the revision number (default)')), | ||||
(b'c', b'changeset', None, _(b'list the changeset')), | (b'c', b'changeset', None, _(b'list the changeset')), | ||||
( | ( | ||||
b'l', | b'l', | ||||
b'line-number', | b'line-number', | ||||
None, | None, | ||||
_(b'show line number at the first ' b'appearance'), | _(b'show line number at the first appearance'), | ||||
), | ), | ||||
( | ( | ||||
b'e', | b'e', | ||||
b'deleted', | b'deleted', | ||||
None, | None, | ||||
_(b'show deleted lines (slow) (EXPERIMENTAL)'), | _(b'show deleted lines (slow) (EXPERIMENTAL)'), | ||||
), | ), | ||||
( | ( | ||||
b'long-hash', | b'long-hash', | ||||
None, | None, | ||||
_(b'show long changeset hash (EXPERIMENTAL)'), | _(b'show long changeset hash (EXPERIMENTAL)'), | ||||
), | ), | ||||
( | ( | ||||
b'', | b'', | ||||
b'rebuild', | b'rebuild', | ||||
None, | None, | ||||
_(b'rebuild cache even if it exists ' b'(EXPERIMENTAL)'), | _(b'rebuild cache even if it exists (EXPERIMENTAL)'), | ||||
), | ), | ||||
] | ] | ||||
+ commands.diffwsopts | + commands.diffwsopts | ||||
+ commands.walkopts | + commands.walkopts | ||||
+ commands.formatteropts, | + commands.formatteropts, | ||||
r'synopsis': _(b'[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'), | r'synopsis': _(b'[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'), | ||||
r'inferrepo': True, | r'inferrepo': True, | ||||
} | } |
# skiprevs is not supported yet | # skiprevs is not supported yet | ||||
return orig( | return orig( | ||||
self, follow, linenumber, skiprevs=skiprevs, diffopts=diffopts | self, follow, linenumber, skiprevs=skiprevs, diffopts=diffopts | ||||
) | ) | ||||
try: | try: | ||||
return _doannotate(self, follow, diffopts) | return _doannotate(self, follow, diffopts) | ||||
except Exception as ex: | except Exception as ex: | ||||
self._repo.ui.debug( | self._repo.ui.debug( | ||||
b'fastannotate: falling back to the vanilla ' b'annotate: %r\n' % ex | b'fastannotate: falling back to the vanilla annotate: %r\n' % ex | ||||
) | ) | ||||
return orig(self, follow=follow, skiprevs=skiprevs, diffopts=diffopts) | return orig(self, follow=follow, skiprevs=skiprevs, diffopts=diffopts) | ||||
def _remotefctxannotate(orig, self, follow=False, skiprevs=None, diffopts=None): | def _remotefctxannotate(orig, self, follow=False, skiprevs=None, diffopts=None): | ||||
# skipset: a set-like used to test if a fctx needs to be downloaded | # skipset: a set-like used to test if a fctx needs to be downloaded | ||||
with context.fctxannotatecontext(self, follow, diffopts) as ac: | with context.fctxannotatecontext(self, follow, diffopts) as ac: | ||||
skipset = revmap.revmap(ac.revmappath) | skipset = revmap.revmap(ac.revmappath) |
b'Automated merge with %s' % util.removeauth(other.url()) | b'Automated merge with %s' % util.removeauth(other.url()) | ||||
) | ) | ||||
editopt = opts.get(b'edit') or opts.get(b'force_editor') | editopt = opts.get(b'edit') or opts.get(b'force_editor') | ||||
editor = cmdutil.getcommiteditor(edit=editopt, editform=b'fetch') | editor = cmdutil.getcommiteditor(edit=editopt, editform=b'fetch') | ||||
n = repo.commit( | n = repo.commit( | ||||
message, opts[b'user'], opts[b'date'], editor=editor | message, opts[b'user'], opts[b'date'], editor=editor | ||||
) | ) | ||||
ui.status( | ui.status( | ||||
_(b'new changeset %d:%s merges remote changes ' b'with local\n') | _(b'new changeset %d:%s merges remote changes with local\n') | ||||
% (repo.changelog.rev(n), short(n)) | % (repo.changelog.rev(n), short(n)) | ||||
) | ) | ||||
return err | return err | ||||
finally: | finally: | ||||
release(lock, wlock) | release(lock, wlock) |
return revs | return revs | ||||
def checknodescendants(repo, revs): | def checknodescendants(repo, revs): | ||||
if not obsolete.isenabled(repo, obsolete.allowunstableopt) and repo.revs( | if not obsolete.isenabled(repo, obsolete.allowunstableopt) and repo.revs( | ||||
b'(%ld::) - (%ld)', revs, revs | b'(%ld::) - (%ld)', revs, revs | ||||
): | ): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'can only fix a changeset together ' b'with all its descendants') | _(b'can only fix a changeset together with all its descendants') | ||||
) | ) | ||||
def checkfixablectx(ui, repo, ctx): | def checkfixablectx(ui, repo, ctx): | ||||
"""Aborts if the revision shouldn't be replaced with a fixed one.""" | """Aborts if the revision shouldn't be replaced with a fixed one.""" | ||||
if not ctx.mutable(): | if not ctx.mutable(): | ||||
raise error.Abort( | raise error.Abort( | ||||
b'can\'t fix immutable changeset %s' | b'can\'t fix immutable changeset %s' |
def githelp(ui, repo, *args, **kwargs): | def githelp(ui, repo, *args, **kwargs): | ||||
'''suggests the Mercurial equivalent of the given git command | '''suggests the Mercurial equivalent of the given git command | ||||
Usage: hg githelp -- <git command> | Usage: hg githelp -- <git command> | ||||
''' | ''' | ||||
if len(args) == 0 or (len(args) == 1 and args[0] == b'git'): | if len(args) == 0 or (len(args) == 1 and args[0] == b'git'): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'missing git command - ' b'usage: hg githelp -- <git command>') | _(b'missing git command - usage: hg githelp -- <git command>') | ||||
) | ) | ||||
if args[0] == b'git': | if args[0] == b'git': | ||||
args = args[1:] | args = args[1:] | ||||
cmd = args[0] | cmd = args[0] | ||||
if not cmd in gitcommands: | if not cmd in gitcommands: | ||||
raise error.Abort(_(b"error: unknown git command %s") % cmd) | raise error.Abort(_(b"error: unknown git command %s") % cmd) |
fd, sigfile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".sig") | fd, sigfile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".sig") | ||||
fp = os.fdopen(fd, r'wb') | fp = os.fdopen(fd, r'wb') | ||||
fp.write(sig) | fp.write(sig) | ||||
fp.close() | fp.close() | ||||
fd, datafile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".txt") | fd, datafile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".txt") | ||||
fp = os.fdopen(fd, r'wb') | fp = os.fdopen(fd, r'wb') | ||||
fp.write(data) | fp.write(data) | ||||
fp.close() | fp.close() | ||||
gpgcmd = b"%s --logger-fd 1 --status-fd 1 --verify " b"\"%s\" \"%s\"" % ( | gpgcmd = b"%s --logger-fd 1 --status-fd 1 --verify \"%s\" \"%s\"" % ( | ||||
self.path, | self.path, | ||||
sigfile, | sigfile, | ||||
datafile, | datafile, | ||||
) | ) | ||||
ret = procutil.filter(b"", gpgcmd) | ret = procutil.filter(b"", gpgcmd) | ||||
finally: | finally: | ||||
for f in (sigfile, datafile): | for f in (sigfile, datafile): | ||||
try: | try: | ||||
if key[0] == b"ERRSIG": | if key[0] == b"ERRSIG": | ||||
ui.write(_(b"%s Unknown key ID \"%s\"\n") % (prefix, key[1])) | ui.write(_(b"%s Unknown key ID \"%s\"\n") % (prefix, key[1])) | ||||
continue | continue | ||||
if key[0] == b"BADSIG": | if key[0] == b"BADSIG": | ||||
ui.write(_(b"%s Bad signature from \"%s\"\n") % (prefix, key[2])) | ui.write(_(b"%s Bad signature from \"%s\"\n") % (prefix, key[2])) | ||||
continue | continue | ||||
if key[0] == b"EXPSIG": | if key[0] == b"EXPSIG": | ||||
ui.write( | ui.write( | ||||
_(b"%s Note: Signature has expired" b" (signed by: \"%s\")\n") | _(b"%s Note: Signature has expired (signed by: \"%s\")\n") | ||||
% (prefix, key[2]) | % (prefix, key[2]) | ||||
) | ) | ||||
elif key[0] == b"EXPKEYSIG": | elif key[0] == b"EXPKEYSIG": | ||||
ui.write( | ui.write( | ||||
_(b"%s Note: This key has expired" b" (signed by: \"%s\")\n") | _(b"%s Note: This key has expired (signed by: \"%s\")\n") | ||||
% (prefix, key[2]) | % (prefix, key[2]) | ||||
) | ) | ||||
validkeys.append((key[1], key[2], key[3])) | validkeys.append((key[1], key[2], key[3])) | ||||
return validkeys | return validkeys | ||||
@command(b"sigs", [], _(b'hg sigs'), helpcategory=_HELP_CATEGORY) | @command(b"sigs", [], _(b'hg sigs'), helpcategory=_HELP_CATEGORY) | ||||
def sigs(ui, repo): | def sigs(ui, repo): | ||||
if revs: | if revs: | ||||
nodes = [repo.lookup(n) for n in revs] | nodes = [repo.lookup(n) for n in revs] | ||||
else: | else: | ||||
nodes = [ | nodes = [ | ||||
node for node in repo.dirstate.parents() if node != hgnode.nullid | node for node in repo.dirstate.parents() if node != hgnode.nullid | ||||
] | ] | ||||
if len(nodes) > 1: | if len(nodes) > 1: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'uncommitted merge - please provide a ' b'specific revision') | _(b'uncommitted merge - please provide a specific revision') | ||||
) | ) | ||||
if not nodes: | if not nodes: | ||||
nodes = [repo.changelog.tip()] | nodes = [repo.changelog.tip()] | ||||
for n in nodes: | for n in nodes: | ||||
hexnode = hgnode.hex(n) | hexnode = hgnode.hex(n) | ||||
ui.write( | ui.write( | ||||
_(b"signing %d:%s\n") % (repo.changelog.rev(n), hgnode.short(n)) | _(b"signing %d:%s\n") % (repo.changelog.rev(n), hgnode.short(n)) |
highlight = pygments.highlight | highlight = pygments.highlight | ||||
ClassNotFound = pygments.util.ClassNotFound | ClassNotFound = pygments.util.ClassNotFound | ||||
guess_lexer = pygments.lexers.guess_lexer | guess_lexer = pygments.lexers.guess_lexer | ||||
guess_lexer_for_filename = pygments.lexers.guess_lexer_for_filename | guess_lexer_for_filename = pygments.lexers.guess_lexer_for_filename | ||||
TextLexer = pygments.lexers.TextLexer | TextLexer = pygments.lexers.TextLexer | ||||
HtmlFormatter = pygments.formatters.HtmlFormatter | HtmlFormatter = pygments.formatters.HtmlFormatter | ||||
SYNTAX_CSS = ( | SYNTAX_CSS = ( | ||||
b'\n<link rel="stylesheet" href="{url}highlightcss" ' b'type="text/css" />' | b'\n<link rel="stylesheet" href="{url}highlightcss" type="text/css" />' | ||||
) | ) | ||||
def pygmentize(field, fctx, style, tmpl, guessfilenameonly=False): | def pygmentize(field, fctx, style, tmpl, guessfilenameonly=False): | ||||
# append a <link ...> to the syntax highlighting css | # append a <link ...> to the syntax highlighting css | ||||
tmpl.load(b'header') | tmpl.load(b'header') | ||||
old_header = tmpl.cache[b'header'] | old_header = tmpl.cache[b'header'] |
if any((outg, abort, revs, freeargs, rules, editplan)): | if any((outg, abort, revs, freeargs, rules, editplan)): | ||||
raise error.Abort(_(b'no arguments allowed with --continue')) | raise error.Abort(_(b'no arguments allowed with --continue')) | ||||
elif goal == b'abort': | elif goal == b'abort': | ||||
if any((outg, revs, freeargs, rules, editplan)): | if any((outg, revs, freeargs, rules, editplan)): | ||||
raise error.Abort(_(b'no arguments allowed with --abort')) | raise error.Abort(_(b'no arguments allowed with --abort')) | ||||
elif goal == b'edit-plan': | elif goal == b'edit-plan': | ||||
if any((outg, revs, freeargs)): | if any((outg, revs, freeargs)): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'only --commands argument allowed with ' b'--edit-plan') | _(b'only --commands argument allowed with --edit-plan') | ||||
) | ) | ||||
else: | else: | ||||
if state.inprogress(): | if state.inprogress(): | ||||
raise error.Abort( | raise error.Abort( | ||||
_( | _( | ||||
b'history edit already in progress, try ' | b'history edit already in progress, try ' | ||||
b'--continue or --abort' | b'--continue or --abort' | ||||
) | ) |
self._waittimeout = waittimeout | self._waittimeout = waittimeout | ||||
self._locktimeout = locktimeout | self._locktimeout = locktimeout | ||||
def sqlconnect(self): | def sqlconnect(self): | ||||
if self.sqlconn: | if self.sqlconn: | ||||
raise indexapi.indexexception(b"SQL connection already open") | raise indexapi.indexexception(b"SQL connection already open") | ||||
if self.sqlcursor: | if self.sqlcursor: | ||||
raise indexapi.indexexception( | raise indexapi.indexexception( | ||||
b"SQL cursor already open without" b" connection" | b"SQL cursor already open without connection" | ||||
) | ) | ||||
retry = 3 | retry = 3 | ||||
while True: | while True: | ||||
try: | try: | ||||
self.sqlconn = mysql.connector.connect(**self.sqlargs) | self.sqlconn = mysql.connector.connect(**self.sqlargs) | ||||
# Code is copy-pasted from hgsql. Bug fixes need to be | # Code is copy-pasted from hgsql. Bug fixes need to be | ||||
# back-ported! | # back-ported! | ||||
else: | else: | ||||
self.sqlconn.rollback() | self.sqlconn.rollback() | ||||
def addbundle(self, bundleid, nodesctx): | def addbundle(self, bundleid, nodesctx): | ||||
if not self._connected: | if not self._connected: | ||||
self.sqlconnect() | self.sqlconnect() | ||||
self.log.info(b"ADD BUNDLE %r %r" % (self.reponame, bundleid)) | self.log.info(b"ADD BUNDLE %r %r" % (self.reponame, bundleid)) | ||||
self.sqlcursor.execute( | self.sqlcursor.execute( | ||||
b"INSERT INTO bundles(bundle, reponame) VALUES " b"(%s, %s)", | b"INSERT INTO bundles(bundle, reponame) VALUES (%s, %s)", | ||||
params=(bundleid, self.reponame), | params=(bundleid, self.reponame), | ||||
) | ) | ||||
for ctx in nodesctx: | for ctx in nodesctx: | ||||
self.sqlcursor.execute( | self.sqlcursor.execute( | ||||
b"INSERT INTO nodestobundle(node, bundle, reponame) " | b"INSERT INTO nodestobundle(node, bundle, reponame) " | ||||
b"VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE " | b"VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE " | ||||
b"bundle=VALUES(bundle)", | b"bundle=VALUES(bundle)", | ||||
params=(ctx.hex(), bundleid, self.reponame), | params=(ctx.hex(), bundleid, self.reponame), |
if self.gitmode == b'auto': | if self.gitmode == b'auto': | ||||
diffopts.upgrade = True | diffopts.upgrade = True | ||||
elif self.gitmode == b'keep': | elif self.gitmode == b'keep': | ||||
pass | pass | ||||
elif self.gitmode in (b'yes', b'no'): | elif self.gitmode in (b'yes', b'no'): | ||||
diffopts.git = self.gitmode == b'yes' | diffopts.git = self.gitmode == b'yes' | ||||
else: | else: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'mq.git option can be auto/keep/yes/no' b' got %s') | _(b'mq.git option can be auto/keep/yes/no got %s') | ||||
% self.gitmode | % self.gitmode | ||||
) | ) | ||||
if patchfn: | if patchfn: | ||||
diffopts = self.patchopts(diffopts, patchfn) | diffopts = self.patchopts(diffopts, patchfn) | ||||
return diffopts | return diffopts | ||||
def patchopts(self, diffopts, *patches): | def patchopts(self, diffopts, *patches): | ||||
"""Return a copy of input diff options with git set to true if | """Return a copy of input diff options with git set to true if | ||||
if n is None: | if n is None: | ||||
raise error.Abort(_(b"repository commit failed")) | raise error.Abort(_(b"repository commit failed")) | ||||
if update_status: | if update_status: | ||||
self.applied.append(statusentry(n, patchname)) | self.applied.append(statusentry(n, patchname)) | ||||
if patcherr: | if patcherr: | ||||
self.ui.warn( | self.ui.warn( | ||||
_(b"patch failed, rejects left in working " b"directory\n") | _(b"patch failed, rejects left in working directory\n") | ||||
) | ) | ||||
err = 2 | err = 2 | ||||
break | break | ||||
if fuzz and strict: | if fuzz and strict: | ||||
self.ui.warn(_(b"fuzz found when applying patch, stopping\n")) | self.ui.warn(_(b"fuzz found when applying patch, stopping\n")) | ||||
err = 3 | err = 3 | ||||
break | break | ||||
tphase = phases.newcommitphase(repo.ui) | tphase = phases.newcommitphase(repo.ui) | ||||
if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase: | if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase: | ||||
with repo.transaction(b'qfinish') as tr: | with repo.transaction(b'qfinish') as tr: | ||||
phases.advanceboundary(repo, tr, tphase, qfinished) | phases.advanceboundary(repo, tr, tphase, qfinished) | ||||
def delete(self, repo, patches, opts): | def delete(self, repo, patches, opts): | ||||
if not patches and not opts.get(b'rev'): | if not patches and not opts.get(b'rev'): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'qdelete requires at least one revision or ' b'patch name') | _(b'qdelete requires at least one revision or patch name') | ||||
) | ) | ||||
realpatches = [] | realpatches = [] | ||||
for patch in patches: | for patch in patches: | ||||
patch = self.lookup(patch, strict=True) | patch = self.lookup(patch, strict=True) | ||||
info = self.isapplied(patch) | info = self.isapplied(patch) | ||||
if info: | if info: | ||||
raise error.Abort(_(b"cannot delete applied patch %s") % patch) | raise error.Abort(_(b"cannot delete applied patch %s") % patch) | ||||
def checkreservedname(self, name): | def checkreservedname(self, name): | ||||
if name in self._reserved: | if name in self._reserved: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'"%s" cannot be used as the name of a patch') % name | _(b'"%s" cannot be used as the name of a patch') % name | ||||
) | ) | ||||
if name != name.strip(): | if name != name.strip(): | ||||
# whitespace is stripped by parseseries() | # whitespace is stripped by parseseries() | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'patch name cannot begin or end with ' b'whitespace') | _(b'patch name cannot begin or end with whitespace') | ||||
) | ) | ||||
for prefix in (b'.hg', b'.mq'): | for prefix in (b'.hg', b'.mq'): | ||||
if name.startswith(prefix): | if name.startswith(prefix): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'patch name cannot begin with "%s"') % prefix | _(b'patch name cannot begin with "%s"') % prefix | ||||
) | ) | ||||
for c in (b'#', b':', b'\r', b'\n'): | for c in (b'#', b':', b'\r', b'\n'): | ||||
if c in name: | if c in name: | ||||
if exact: | if exact: | ||||
if keepchanges: | if keepchanges: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"cannot use --exact and --keep-changes together") | _(b"cannot use --exact and --keep-changes together") | ||||
) | ) | ||||
if move: | if move: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot use --exact and --move ' b'together') | _(b'cannot use --exact and --move together') | ||||
) | ) | ||||
if self.applied: | if self.applied: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot push --exact with applied ' b'patches') | _(b'cannot push --exact with applied patches') | ||||
) | ) | ||||
root = self.series[start] | root = self.series[start] | ||||
target = patchheader(self.join(root), self.plainmode).parent | target = patchheader(self.join(root), self.plainmode).parent | ||||
if not target: | if not target: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"%s does not have a parent recorded") % root | _(b"%s does not have a parent recorded") % root | ||||
) | ) | ||||
if not repo[target] == repo[b'.']: | if not repo[target] == repo[b'.']: | ||||
if patchname in self.series: | if patchname in self.series: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'patch %s is already in the series file') % patchname | _(b'patch %s is already in the series file') % patchname | ||||
) | ) | ||||
if rev: | if rev: | ||||
if files: | if files: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'option "-r" not valid when importing ' b'files') | _(b'option "-r" not valid when importing files') | ||||
) | ) | ||||
rev = scmutil.revrange(repo, rev) | rev = scmutil.revrange(repo, rev) | ||||
rev.sort(reverse=True) | rev.sort(reverse=True) | ||||
elif not files: | elif not files: | ||||
raise error.Abort(_(b'no files or revisions specified')) | raise error.Abort(_(b'no files or revisions specified')) | ||||
if (len(files) > 1 or len(rev) > 1) and patchname: | if (len(files) > 1 or len(rev) > 1) and patchname: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'option "-n" not valid when importing multiple ' b'patches') | _(b'option "-n" not valid when importing multiple patches') | ||||
) | ) | ||||
imported = [] | imported = [] | ||||
if rev: | if rev: | ||||
# If mq patches are applied, we can only import revisions | # If mq patches are applied, we can only import revisions | ||||
# that form a linear path to qbase. | # that form a linear path to qbase. | ||||
# Otherwise, they should form a linear path to a head. | # Otherwise, they should form a linear path to a head. | ||||
heads = repo.changelog.heads(repo.changelog.node(rev.first())) | heads = repo.changelog.heads(repo.changelog.node(rev.first())) | ||||
if len(heads) > 1: | if len(heads) > 1: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'revision %d is the root of more than one ' b'branch') | _(b'revision %d is the root of more than one branch') | ||||
% rev.last() | % rev.last() | ||||
) | ) | ||||
if self.applied: | if self.applied: | ||||
base = repo.changelog.node(rev.first()) | base = repo.changelog.node(rev.first()) | ||||
if base in [n.node for n in self.applied]: | if base in [n.node for n in self.applied]: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'revision %d is already managed') % rev.first() | _(b'revision %d is already managed') % rev.first() | ||||
) | ) | ||||
if heads != [self.applied[-1].node]: | if heads != [self.applied[-1].node]: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'revision %d is not the parent of ' b'the queue') | _(b'revision %d is not the parent of the queue') | ||||
% rev.first() | % rev.first() | ||||
) | ) | ||||
base = repo.changelog.rev(self.applied[0].node) | base = repo.changelog.rev(self.applied[0].node) | ||||
lastparent = repo.changelog.parentrevs(base)[0] | lastparent = repo.changelog.parentrevs(base)[0] | ||||
else: | else: | ||||
if heads != [repo.changelog.node(rev.first())]: | if heads != [repo.changelog.node(rev.first())]: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'revision %d has unmanaged children') % rev.first() | _(b'revision %d has unmanaged children') % rev.first() | ||||
p1, p2 = repo.changelog.parentrevs(r) | p1, p2 = repo.changelog.parentrevs(r) | ||||
n = repo.changelog.node(r) | n = repo.changelog.node(r) | ||||
if p2 != nullrev: | if p2 != nullrev: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot import merge revision %d') % r | _(b'cannot import merge revision %d') % r | ||||
) | ) | ||||
if lastparent and lastparent != r: | if lastparent and lastparent != r: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'revision %d is not the parent of ' b'%d') | _(b'revision %d is not the parent of %d') | ||||
% (r, lastparent) | % (r, lastparent) | ||||
) | ) | ||||
lastparent = p1 | lastparent = p1 | ||||
if not patchname: | if not patchname: | ||||
patchname = self.makepatchname( | patchname = self.makepatchname( | ||||
repo[r].description().split(b'\n', 1)[0], | repo[r].description().split(b'\n', 1)[0], | ||||
b'%d.diff' % r, | b'%d.diff' % r, | ||||
if opts.get(b'patches'): | if opts.get(b'patches'): | ||||
patchespath = ui.expandpath(opts.get(b'patches')) | patchespath = ui.expandpath(opts.get(b'patches')) | ||||
else: | else: | ||||
patchespath = patchdir(sr) | patchespath = patchdir(sr) | ||||
try: | try: | ||||
hg.peer(ui, opts, patchespath) | hg.peer(ui, opts, patchespath) | ||||
except error.RepoError: | except error.RepoError: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'versioned patch repository not found' b' (see init --mq)') | _(b'versioned patch repository not found (see init --mq)') | ||||
) | ) | ||||
qbase, destrev = None, None | qbase, destrev = None, None | ||||
if sr.local(): | if sr.local(): | ||||
repo = sr.local() | repo = sr.local() | ||||
if repo.mq.applied and repo[qbase].phase() != phases.secret: | if repo.mq.applied and repo[qbase].phase() != phases.secret: | ||||
qbase = repo.mq.applied[0].node | qbase = repo.mq.applied[0].node | ||||
if not hg.islocal(dest): | if not hg.islocal(dest): | ||||
heads = set(repo.heads()) | heads = set(repo.heads()) | ||||
q = repo.mq | q = repo.mq | ||||
applied = set(p.name for p in q.applied) | applied = set(p.name for p in q.applied) | ||||
patch = None | patch = None | ||||
args = list(args) | args = list(args) | ||||
if opts.get(r'list'): | if opts.get(r'list'): | ||||
if args or opts.get(r'none'): | if args or opts.get(r'none'): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot mix -l/--list with options or ' b'arguments') | _(b'cannot mix -l/--list with options or arguments') | ||||
) | ) | ||||
for i in pycompat.xrange(len(q.series)): | for i in pycompat.xrange(len(q.series)): | ||||
status(i) | status(i) | ||||
return | return | ||||
if not args or args[0][0:1] in b'-+': | if not args or args[0][0:1] in b'-+': | ||||
if not q.applied: | if not q.applied: | ||||
raise error.Abort(_(b'no patches applied')) | raise error.Abort(_(b'no patches applied')) | ||||
patch = q.applied[-1].name | patch = q.applied[-1].name | ||||
q.savedirty() # save to .hg/patches before copying | q.savedirty() # save to .hg/patches before copying | ||||
if opts.get(b'copy'): | if opts.get(b'copy'): | ||||
path = q.path | path = q.path | ||||
if opts.get(b'name'): | if opts.get(b'name'): | ||||
newpath = os.path.join(q.basepath, opts.get(b'name')) | newpath = os.path.join(q.basepath, opts.get(b'name')) | ||||
if os.path.exists(newpath): | if os.path.exists(newpath): | ||||
if not os.path.isdir(newpath): | if not os.path.isdir(newpath): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'destination %s exists and is not ' b'a directory') | _(b'destination %s exists and is not a directory') | ||||
% newpath | % newpath | ||||
) | ) | ||||
if not opts.get(b'force'): | if not opts.get(b'force'): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'destination %s exists, ' b'use -f to force') | _(b'destination %s exists, use -f to force') % newpath | ||||
% newpath | |||||
) | ) | ||||
else: | else: | ||||
newpath = savename(path) | newpath = savename(path) | ||||
ui.warn(_(b"copy %s to %s\n") % (path, newpath)) | ui.warn(_(b"copy %s to %s\n") % (path, newpath)) | ||||
util.copyfiles(path, newpath) | util.copyfiles(path, newpath) | ||||
if opts.get(b'empty'): | if opts.get(b'empty'): | ||||
del q.applied[:] | del q.applied[:] | ||||
q.applieddirty = True | q.applieddirty = True | ||||
mqtags.append((mqtags[-1][0], b'qtip')) | mqtags.append((mqtags[-1][0], b'qtip')) | ||||
mqtags.append((mqtags[0][0], b'qbase')) | mqtags.append((mqtags[0][0], b'qbase')) | ||||
mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent')) | mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent')) | ||||
tags = result[0] | tags = result[0] | ||||
for patch in mqtags: | for patch in mqtags: | ||||
if patch[1] in tags: | if patch[1] in tags: | ||||
self.ui.warn( | self.ui.warn( | ||||
_(b'tag %s overrides mq patch of the same ' b'name\n') | _(b'tag %s overrides mq patch of the same name\n') | ||||
% patch[1] | % patch[1] | ||||
) | ) | ||||
else: | else: | ||||
tags[patch[1]] = patch[0] | tags[patch[1]] = patch[0] | ||||
return result | return result | ||||
if repo.local(): | if repo.local(): | ||||
if not mq: | if not mq: | ||||
return orig(ui, *args, **kwargs) | return orig(ui, *args, **kwargs) | ||||
if args: | if args: | ||||
repopath = args[0] | repopath = args[0] | ||||
if not hg.islocal(repopath): | if not hg.islocal(repopath): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'only a local queue repository ' b'may be initialized') | _(b'only a local queue repository may be initialized') | ||||
) | ) | ||||
else: | else: | ||||
repopath = cmdutil.findrepo(encoding.getcwd()) | repopath = cmdutil.findrepo(encoding.getcwd()) | ||||
if not repopath: | if not repopath: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'there is no Mercurial repository here ' b'(.hg not found)') | _(b'there is no Mercurial repository here (.hg not found)') | ||||
) | ) | ||||
repo = hg.repository(ui, repopath) | repo = hg.repository(ui, repopath) | ||||
return qinit(ui, repo, True) | return qinit(ui, repo, True) | ||||
def mqcommand(orig, ui, repo, *args, **kwargs): | def mqcommand(orig, ui, repo, *args, **kwargs): | ||||
"""Add --mq option to operate on patch repository instead of main""" | """Add --mq option to operate on patch repository instead of main""" | ||||
ui.status(b'%s\n' % node.short(n)) | ui.status(b'%s\n' % node.short(n)) | ||||
ui.status( | ui.status( | ||||
_(b'...and %d more, use --verbose to list all\n') | _(b'...and %d more, use --verbose to list all\n') | ||||
% (len(visibletostrip) - maxnodes) | % (len(visibletostrip) - maxnodes) | ||||
) | ) | ||||
if not force: | if not force: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'local changes found'), | _(b'local changes found'), | ||||
hint=_(b'use --force-delete-local-changes to ' b'ignore'), | hint=_(b'use --force-delete-local-changes to ignore'), | ||||
) | ) | ||||
with ui.uninterruptible(): | with ui.uninterruptible(): | ||||
if revstostrip: | if revstostrip: | ||||
tostrip = [unfi.changelog.node(r) for r in revstostrip] | tostrip = [unfi.changelog.node(r) for r in revstostrip] | ||||
if repo[b'.'].node() in tostrip: | if repo[b'.'].node() in tostrip: | ||||
# stripping working copy, so move to a different commit first | # stripping working copy, so move to a different commit first | ||||
urev = max( | urev = max( |
subs.add(sub) | subs.add(sub) | ||||
continue | continue | ||||
revs = self.repo.revs(b'%r and %d:', spec, ctx.rev()) | revs = self.repo.revs(b'%r and %d:', spec, ctx.rev()) | ||||
if len(revs): | if len(revs): | ||||
subs.add(sub) | subs.add(sub) | ||||
continue | continue | ||||
if len(subs) == 0: | if len(subs) == 0: | ||||
self.ui.debug( | self.ui.debug( | ||||
b'notify: no subscribers to selected repo ' b'and revset\n' | b'notify: no subscribers to selected repo and revset\n' | ||||
) | ) | ||||
return | return | ||||
p = emailparser.Parser() | p = emailparser.Parser() | ||||
try: | try: | ||||
msg = p.parsestr(encoding.strfromlocal(data)) | msg = p.parsestr(encoding.strfromlocal(data)) | ||||
except emailerrors.MessageParseError as inst: | except emailerrors.MessageParseError as inst: | ||||
raise error.Abort(inst) | raise error.Abort(inst) |
"""Add a header pointing to a public URL where the changeset is available | """Add a header pointing to a public URL where the changeset is available | ||||
""" | """ | ||||
repo = ctx.repo() | repo = ctx.repo() | ||||
# experimental config: patchbomb.publicurl | # experimental config: patchbomb.publicurl | ||||
# waiting for some logic that check that the changeset are available on the | # waiting for some logic that check that the changeset are available on the | ||||
# destination before patchbombing anything. | # destination before patchbombing anything. | ||||
publicurl = repo.ui.config(b'patchbomb', b'publicurl') | publicurl = repo.ui.config(b'patchbomb', b'publicurl') | ||||
if publicurl: | if publicurl: | ||||
return b'Available At %s\n' b'# hg pull %s -r %s' % ( | return b'Available At %s\n# hg pull %s -r %s' % ( | ||||
publicurl, | publicurl, | ||||
publicurl, | publicurl, | ||||
ctx, | ctx, | ||||
) | ) | ||||
return None | return None | ||||
def uisetup(ui): | def uisetup(ui): | ||||
"""return a list of patches for a list of revisions | """return a list of patches for a list of revisions | ||||
Each patch in the list is itself a list of lines. | Each patch in the list is itself a list of lines. | ||||
""" | """ | ||||
ui = repo.ui | ui = repo.ui | ||||
prev = repo[b'.'].rev() | prev = repo[b'.'].rev() | ||||
for r in revs: | for r in revs: | ||||
if r == prev and (repo[None].files() or repo[None].deleted()): | if r == prev and (repo[None].files() or repo[None].deleted()): | ||||
ui.warn( | ui.warn(_(b'warning: working directory has uncommitted changes\n')) | ||||
_(b'warning: working directory has ' b'uncommitted changes\n') | |||||
) | |||||
output = stringio() | output = stringio() | ||||
cmdutil.exportfile( | cmdutil.exportfile( | ||||
repo, [r], output, opts=patch.difffeatureopts(ui, opts, git=True) | repo, [r], output, opts=patch.difffeatureopts(ui, opts, git=True) | ||||
) | ) | ||||
yield output.getvalue().split(b'\n') | yield output.getvalue().split(b'\n') | ||||
def _getbundle(repo, dest, **opts): | def _getbundle(repo, dest, **opts): | ||||
The body can be obtained either from the command line option or entered by | The body can be obtained either from the command line option or entered by | ||||
the user through the editor. | the user through the editor. | ||||
""" | """ | ||||
ui = repo.ui | ui = repo.ui | ||||
if opts.get(r'desc'): | if opts.get(r'desc'): | ||||
body = open(opts.get(r'desc')).read() | body = open(opts.get(r'desc')).read() | ||||
else: | else: | ||||
ui.write( | ui.write( | ||||
_(b'\nWrite the introductory message for the ' b'patch series.\n\n') | _(b'\nWrite the introductory message for the patch series.\n\n') | ||||
) | ) | ||||
body = ui.edit( | body = ui.edit( | ||||
defaultbody, sender, repopath=repo.path, action=b'patchbombbody' | defaultbody, sender, repopath=repo.path, action=b'patchbombbody' | ||||
) | ) | ||||
# Save series description in case sendmail fails | # Save series description in case sendmail fails | ||||
msgfile = repo.vfs(b'last-email.txt', b'wb') | msgfile = repo.vfs(b'last-email.txt', b'wb') | ||||
msgfile.write(body) | msgfile.write(body) | ||||
msgfile.close() | msgfile.close() | ||||
for addr in showaddrs: | for addr in showaddrs: | ||||
ui.write(b'%s\n' % addr, label=b'patchbomb.to') | ui.write(b'%s\n' % addr, label=b'patchbomb.to') | ||||
for m, subj, ds in msgs: | for m, subj, ds in msgs: | ||||
ui.write((b'Subject: %s\n' % subj), label=b'patchbomb.subject') | ui.write((b'Subject: %s\n' % subj), label=b'patchbomb.subject') | ||||
if ds: | if ds: | ||||
ui.write(ds, label=b'patchbomb.diffstats') | ui.write(ds, label=b'patchbomb.diffstats') | ||||
ui.write(b'\n') | ui.write(b'\n') | ||||
if ui.promptchoice( | if ui.promptchoice( | ||||
_(b'are you sure you want to send (yn)?' b'$$ &Yes $$ &No') | _(b'are you sure you want to send (yn)?$$ &Yes $$ &No') | ||||
): | ): | ||||
raise error.Abort(_(b'patchbomb canceled')) | raise error.Abort(_(b'patchbomb canceled')) | ||||
ui.write(b'\n') | ui.write(b'\n') | ||||
parent = opts.get(b'in_reply_to') or None | parent = opts.get(b'in_reply_to') or None | ||||
# angle brackets may be omitted, they're not semantically part of the msg-id | # angle brackets may be omitted, they're not semantically part of the msg-id | ||||
if parent is not None: | if parent is not None: |
% ( | % ( | ||||
drevdesc, | drevdesc, | ||||
ui.label(bytes(ctx), b'phabricator.node'), | ui.label(bytes(ctx), b'phabricator.node'), | ||||
ui.label(desc, b'phabricator.desc'), | ui.label(desc, b'phabricator.desc'), | ||||
) | ) | ||||
) | ) | ||||
if ui.promptchoice( | if ui.promptchoice( | ||||
_(b'Send the above changes to %s (yn)?' b'$$ &Yes $$ &No') % url | _(b'Send the above changes to %s (yn)?$$ &Yes $$ &No') % url | ||||
): | ): | ||||
return False | return False | ||||
return True | return True | ||||
_knownstatusnames = { | _knownstatusnames = { | ||||
b'accepted', | b'accepted', |
# desired | # desired | ||||
self.extrafns.insert(0, _savebranch) | self.extrafns.insert(0, _savebranch) | ||||
if self.collapsef: | if self.collapsef: | ||||
branches = set() | branches = set() | ||||
for rev in self.state: | for rev in self.state: | ||||
branches.add(repo[rev].branch()) | branches.add(repo[rev].branch()) | ||||
if len(branches) > 1: | if len(branches) > 1: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot collapse multiple named ' b'branches') | _(b'cannot collapse multiple named branches') | ||||
) | ) | ||||
# Calculate self.obsoletenotrebased | # Calculate self.obsoletenotrebased | ||||
obsrevs = _filterobsoleterevs(self.repo, self.state) | obsrevs = _filterobsoleterevs(self.repo, self.state) | ||||
self._handleskippingobsolete(obsrevs, self.destmap) | self._handleskippingobsolete(obsrevs, self.destmap) | ||||
# Keep track of the active bookmarks in order to reset them later | # Keep track of the active bookmarks in order to reset them later | ||||
self.activebookmark = self.activebookmark or repo._activebookmark | self.activebookmark = self.activebookmark or repo._activebookmark | ||||
) | ) | ||||
% desc | % desc | ||||
) | ) | ||||
repo.ui.status(msg) | repo.ui.status(msg) | ||||
self.skipped.add(rev) | self.skipped.add(rev) | ||||
elif rev in self.obsoletenotrebased: | elif rev in self.obsoletenotrebased: | ||||
succ = self.obsoletenotrebased[rev] | succ = self.obsoletenotrebased[rev] | ||||
if succ is None: | if succ is None: | ||||
msg = ( | msg = _(b'note: not rebasing %s, it has no successor\n') % desc | ||||
_(b'note: not rebasing %s, it has no ' b'successor\n') | |||||
% desc | |||||
) | |||||
else: | else: | ||||
succdesc = _ctxdesc(repo[succ]) | succdesc = _ctxdesc(repo[succ]) | ||||
msg = _( | msg = _( | ||||
b'note: not rebasing %s, already in ' b'destination as %s\n' | b'note: not rebasing %s, already in destination as %s\n' | ||||
) % (desc, succdesc) | ) % (desc, succdesc) | ||||
repo.ui.status(msg) | repo.ui.status(msg) | ||||
# Make clearrebased aware state[rev] is not a true successor | # Make clearrebased aware state[rev] is not a true successor | ||||
self.skipped.add(rev) | self.skipped.add(rev) | ||||
# Record rev as moved to its desired destination in self.state. | # Record rev as moved to its desired destination in self.state. | ||||
# This helps bookmark and working parent movement. | # This helps bookmark and working parent movement. | ||||
dest = max( | dest = max( | ||||
adjustdest(repo, rev, self.destmap, self.state, self.skipped) | adjustdest(repo, rev, self.destmap, self.state, self.skipped) | ||||
def _dryrunrebase(ui, repo, action, opts): | def _dryrunrebase(ui, repo, action, opts): | ||||
rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts) | rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts) | ||||
confirm = opts.get(b'confirm') | confirm = opts.get(b'confirm') | ||||
if confirm: | if confirm: | ||||
ui.status(_(b'starting in-memory rebase\n')) | ui.status(_(b'starting in-memory rebase\n')) | ||||
else: | else: | ||||
ui.status( | ui.status( | ||||
_(b'starting dry-run rebase; repository will not be ' b'changed\n') | _(b'starting dry-run rebase; repository will not be changed\n') | ||||
) | ) | ||||
with repo.wlock(), repo.lock(): | with repo.wlock(), repo.lock(): | ||||
needsabort = True | needsabort = True | ||||
try: | try: | ||||
overrides = {(b'rebase', b'singletransaction'): True} | overrides = {(b'rebase', b'singletransaction'): True} | ||||
with ui.configoverride(overrides, b'rebase'): | with ui.configoverride(overrides, b'rebase'): | ||||
_origrebase( | _origrebase( | ||||
ui, | ui, | ||||
repo, | repo, | ||||
action, | action, | ||||
opts, | opts, | ||||
rbsrt, | rbsrt, | ||||
inmemory=True, | inmemory=True, | ||||
leaveunfinished=True, | leaveunfinished=True, | ||||
) | ) | ||||
except error.InMemoryMergeConflictsError: | except error.InMemoryMergeConflictsError: | ||||
ui.status(_(b'hit a merge conflict\n')) | ui.status(_(b'hit a merge conflict\n')) | ||||
return 1 | return 1 | ||||
except error.Abort: | except error.Abort: | ||||
needsabort = False | needsabort = False | ||||
raise | raise | ||||
else: | else: | ||||
if confirm: | if confirm: | ||||
ui.status(_(b'rebase completed successfully\n')) | ui.status(_(b'rebase completed successfully\n')) | ||||
if not ui.promptchoice( | if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')): | ||||
_(b'apply changes (yn)?' b'$$ &Yes $$ &No') | |||||
): | |||||
# finish unfinished rebase | # finish unfinished rebase | ||||
rbsrt._finishrebase() | rbsrt._finishrebase() | ||||
else: | else: | ||||
rbsrt._prepareabortorcontinue( | rbsrt._prepareabortorcontinue( | ||||
isabort=True, backup=False, suppwarns=True | isabort=True, backup=False, suppwarns=True | ||||
) | ) | ||||
needsabort = False | needsabort = False | ||||
else: | else: | ||||
destset = repo.anyrevs([destf], user=True, localalias=alias) | destset = repo.anyrevs([destf], user=True, localalias=alias) | ||||
size = len(destset) | size = len(destset) | ||||
if size == 1: | if size == 1: | ||||
destmap[r] = destset.first() | destmap[r] = destset.first() | ||||
elif size == 0: | elif size == 0: | ||||
ui.note(_(b'skipping %s - empty destination\n') % repo[r]) | ui.note(_(b'skipping %s - empty destination\n') % repo[r]) | ||||
else: | else: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'rebase destination for %s is not ' b'unique') | _(b'rebase destination for %s is not unique') % repo[r] | ||||
% repo[r] | |||||
) | ) | ||||
if dest is not None: | if dest is not None: | ||||
# single-dest case: assign dest to each rev in rebaseset | # single-dest case: assign dest to each rev in rebaseset | ||||
destrev = dest.rev() | destrev = dest.rev() | ||||
destmap = {r: destrev for r in rebaseset} # {srcrev: destrev} | destmap = {r: destrev for r in rebaseset} # {srcrev: destrev} | ||||
if not destmap: | if not destmap: | ||||
successors in destination or no non-obsolete successor. | successors in destination or no non-obsolete successor. | ||||
""" | """ | ||||
# Obsolete node with successors not in dest leads to divergence | # Obsolete node with successors not in dest leads to divergence | ||||
divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence') | divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence') | ||||
divergencebasecandidates = rebaseobsrevs - rebaseobsskipped | divergencebasecandidates = rebaseobsrevs - rebaseobsskipped | ||||
if divergencebasecandidates and not divergenceok: | if divergencebasecandidates and not divergenceok: | ||||
divhashes = (bytes(repo[r]) for r in divergencebasecandidates) | divhashes = (bytes(repo[r]) for r in divergencebasecandidates) | ||||
msg = _(b"this rebase will cause " b"divergences from: %s") | msg = _(b"this rebase will cause divergences from: %s") | ||||
h = _( | h = _( | ||||
b"to force the rebase please set " | b"to force the rebase please set " | ||||
b"experimental.evolution.allowdivergence=True" | b"experimental.evolution.allowdivergence=True" | ||||
) | ) | ||||
raise error.Abort(msg % (b",".join(divhashes),), hint=h) | raise error.Abort(msg % (b",".join(divhashes),), hint=h) | ||||
def successorrevs(unfi, rev): | def successorrevs(unfi, rev): |
for rev in revs: | for rev in revs: | ||||
ctx = repo[rev] | ctx = repo[rev] | ||||
admonition = re.search(RE_DIRECTIVE, ctx.description()) | admonition = re.search(RE_DIRECTIVE, ctx.description()) | ||||
if admonition: | if admonition: | ||||
if admonition.group(1) in directives: | if admonition.group(1) in directives: | ||||
continue | continue | ||||
else: | else: | ||||
ui.write( | ui.write( | ||||
_(b"Invalid admonition '%s' present in changeset %s" b"\n") | _(b"Invalid admonition '%s' present in changeset %s\n") | ||||
% (admonition.group(1), ctx.hex()[:12]) | % (admonition.group(1), ctx.hex()[:12]) | ||||
) | ) | ||||
sim = lambda x: difflib.SequenceMatcher( | sim = lambda x: difflib.SequenceMatcher( | ||||
None, admonition.group(1), x | None, admonition.group(1), x | ||||
).ratio() | ).ratio() | ||||
similar = [s for s in directives if sim(s) > 0.6] | similar = [s for s in directives if sim(s) > 0.6] | ||||
if len(similar) == 1: | if len(similar) == 1: | ||||
if pblock[b'indent'] > 0: | if pblock[b'indent'] > 0: | ||||
paragraphs.append(pblock[b'lines']) | paragraphs.append(pblock[b'lines']) | ||||
else: | else: | ||||
break | break | ||||
# TODO consider using title as paragraph for more concise notes. | # TODO consider using title as paragraph for more concise notes. | ||||
if not paragraphs: | if not paragraphs: | ||||
repo.ui.warn( | repo.ui.warn( | ||||
_(b"error parsing releasenotes for revision: " b"'%s'\n") | _(b"error parsing releasenotes for revision: '%s'\n") | ||||
% node.hex(ctx.node()) | % node.hex(ctx.node()) | ||||
) | ) | ||||
if title: | if title: | ||||
notes.addtitleditem(directive, title, paragraphs) | notes.addtitleditem(directive, title, paragraphs) | ||||
else: | else: | ||||
notes.addnontitleditem(directive, paragraphs) | notes.addnontitleditem(directive, paragraphs) | ||||
return notes | return notes | ||||
if block[b'type'] in (b'bullet', b'section'): | if block[b'type'] in (b'bullet', b'section'): | ||||
break | break | ||||
if block[b'type'] == b'paragraph': | if block[b'type'] == b'paragraph': | ||||
lines.append(block[b'lines']) | lines.append(block[b'lines']) | ||||
notefragment.append(lines) | notefragment.append(lines) | ||||
continue | continue | ||||
elif block[b'type'] != b'paragraph': | elif block[b'type'] != b'paragraph': | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'unexpected block type in release notes: ' b'%s') | _(b'unexpected block type in release notes: %s') | ||||
% block[b'type'] | % block[b'type'] | ||||
) | ) | ||||
if title: | if title: | ||||
notefragment.append(block[b'lines']) | notefragment.append(block[b'lines']) | ||||
return notefragment | return notefragment | ||||
currentsection = None | currentsection = None |
hg.wirepeersetupfuncs.append(fileserverclient.peersetup) | hg.wirepeersetupfuncs.append(fileserverclient.peersetup) | ||||
entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow) | entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow) | ||||
entry[1].append( | entry[1].append( | ||||
( | ( | ||||
b'', | b'', | ||||
b'shallow', | b'shallow', | ||||
None, | None, | ||||
_(b"create a shallow clone which uses remote file " b"history"), | _(b"create a shallow clone which uses remote file history"), | ||||
) | ) | ||||
) | ) | ||||
extensions.wrapcommand( | extensions.wrapcommand( | ||||
commands.table, b'debugindex', debugcommands.debugindex | commands.table, b'debugindex', debugcommands.debugindex | ||||
) | ) | ||||
extensions.wrapcommand( | extensions.wrapcommand( | ||||
commands.table, b'debugindexdot', debugcommands.debugindexdot | commands.table, b'debugindexdot', debugcommands.debugindexdot | ||||
wanted = set() | wanted = set() | ||||
minrev, maxrev = min(revs), max(revs) | minrev, maxrev = min(revs), max(revs) | ||||
pctx = repo[b'.'] | pctx = repo[b'.'] | ||||
for filename in match.files(): | for filename in match.files(): | ||||
if filename not in pctx: | if filename not in pctx: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot follow file not in parent ' b'revision: "%s"') | _(b'cannot follow file not in parent revision: "%s"') % filename | ||||
% filename | |||||
) | ) | ||||
fctx = pctx[filename] | fctx = pctx[filename] | ||||
linkrev = fctx.linkrev() | linkrev = fctx.linkrev() | ||||
if linkrev >= minrev and linkrev <= maxrev: | if linkrev >= minrev and linkrev <= maxrev: | ||||
fncache.setdefault(linkrev, []).append(filename) | fncache.setdefault(linkrev, []).append(filename) | ||||
wanted.add(linkrev) | wanted.add(linkrev) | ||||
return store.getdeltachain(name, node) | return store.getdeltachain(name, node) | ||||
except KeyError: | except KeyError: | ||||
pass | pass | ||||
raise KeyError((name, hex(node))) | raise KeyError((name, hex(node))) | ||||
def add(self, name, node, data): | def add(self, name, node, data): | ||||
raise RuntimeError( | raise RuntimeError( | ||||
b"cannot add content only to remotefilelog " b"contentstore" | b"cannot add content only to remotefilelog contentstore" | ||||
) | ) | ||||
def getmissing(self, keys): | def getmissing(self, keys): | ||||
missing = keys | missing = keys | ||||
for store in self.stores: | for store in self.stores: | ||||
if missing: | if missing: | ||||
missing = store.getmissing(missing) | missing = store.getmissing(missing) | ||||
return missing | return missing | ||||
if node != self._threaddata.metacache[0]: | if node != self._threaddata.metacache[0]: | ||||
data = self._getdata(name, node) | data = self._getdata(name, node) | ||||
offset, size, flags = shallowutil.parsesizeflags(data) | offset, size, flags = shallowutil.parsesizeflags(data) | ||||
self._updatemetacache(node, size, flags) | self._updatemetacache(node, size, flags) | ||||
return self._threaddata.metacache[1] | return self._threaddata.metacache[1] | ||||
def add(self, name, node, data): | def add(self, name, node, data): | ||||
raise RuntimeError( | raise RuntimeError( | ||||
b"cannot add content only to remotefilelog " b"contentstore" | b"cannot add content only to remotefilelog contentstore" | ||||
) | ) | ||||
def _sanitizemetacache(self): | def _sanitizemetacache(self): | ||||
metacache = getattr(self._threaddata, 'metacache', None) | metacache = getattr(self._threaddata, 'metacache', None) | ||||
if metacache is None: | if metacache is None: | ||||
self._threaddata.metacache = (None, None) # (node, meta) | self._threaddata.metacache = (None, None) # (node, meta) | ||||
def _updatemetacache(self, node, size, flags): | def _updatemetacache(self, node, size, flags): |
try: | try: | ||||
with self._connect() as conn: | with self._connect() as conn: | ||||
remote = conn.peer | remote = conn.peer | ||||
if remote.capable( | if remote.capable( | ||||
constants.NETWORK_CAP_LEGACY_SSH_GETFILES | constants.NETWORK_CAP_LEGACY_SSH_GETFILES | ||||
): | ): | ||||
if not isinstance(remote, _sshv1peer): | if not isinstance(remote, _sshv1peer): | ||||
raise error.Abort( | raise error.Abort( | ||||
b'remotefilelog requires ssh ' b'servers' | b'remotefilelog requires ssh servers' | ||||
) | ) | ||||
step = self.ui.configint( | step = self.ui.configint( | ||||
b'remotefilelog', b'getfilesstep' | b'remotefilelog', b'getfilesstep' | ||||
) | ) | ||||
getfilestype = self.ui.config( | getfilestype = self.ui.config( | ||||
b'remotefilelog', b'getfilestype' | b'remotefilelog', b'getfilestype' | ||||
) | ) | ||||
if getfilestype == b'threaded': | if getfilestype == b'threaded': |
return store.getnodeinfo(name, node) | return store.getnodeinfo(name, node) | ||||
except KeyError: | except KeyError: | ||||
pass | pass | ||||
raise KeyError((name, hex(node))) | raise KeyError((name, hex(node))) | ||||
def add(self, name, node, data): | def add(self, name, node, data): | ||||
raise RuntimeError( | raise RuntimeError( | ||||
b"cannot add content only to remotefilelog " b"contentstore" | b"cannot add content only to remotefilelog contentstore" | ||||
) | ) | ||||
def getmissing(self, keys): | def getmissing(self, keys): | ||||
missing = keys | missing = keys | ||||
for store in self.stores: | for store in self.stores: | ||||
if missing: | if missing: | ||||
missing = store.getmissing(missing) | missing = store.getmissing(missing) | ||||
return missing | return missing | ||||
ancestors = shallowutil.ancestormap(data) | ancestors = shallowutil.ancestormap(data) | ||||
return ancestors | return ancestors | ||||
def getnodeinfo(self, name, node): | def getnodeinfo(self, name, node): | ||||
return self.getancestors(name, node)[node] | return self.getancestors(name, node)[node] | ||||
def add(self, name, node, parents, linknode): | def add(self, name, node, parents, linknode): | ||||
raise RuntimeError( | raise RuntimeError( | ||||
b"cannot add metadata only to remotefilelog " b"metadatastore" | b"cannot add metadata only to remotefilelog metadatastore" | ||||
) | ) | ||||
class remotemetadatastore(object): | class remotemetadatastore(object): | ||||
def __init__(self, ui, fileservice, shared): | def __init__(self, ui, fileservice, shared): | ||||
self._fileservice = fileservice | self._fileservice = fileservice | ||||
self._shared = shared | self._shared = shared | ||||
continue | continue | ||||
yield x | yield x | ||||
elif shallowutil.isenabled(repo): | elif shallowutil.isenabled(repo): | ||||
# don't allow cloning from a shallow repo to a full repo | # don't allow cloning from a shallow repo to a full repo | ||||
# since it would require fetching every version of every | # since it would require fetching every version of every | ||||
# file in order to create the revlogs. | # file in order to create the revlogs. | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"Cannot clone from a shallow repo " b"to a full repo.") | _(b"Cannot clone from a shallow repo to a full repo.") | ||||
) | ) | ||||
else: | else: | ||||
for x in orig(repo, matcher): | for x in orig(repo, matcher): | ||||
yield x | yield x | ||||
extensions.wrapfunction(streamclone, b'_walkstreamfiles', _walkstreamfiles) | extensions.wrapfunction(streamclone, b'_walkstreamfiles', _walkstreamfiles) | ||||
# expose remotefilelog capabilities | # expose remotefilelog capabilities |
def getcachepath(ui, allowempty=False): | def getcachepath(ui, allowempty=False): | ||||
cachepath = ui.config(b"remotefilelog", b"cachepath") | cachepath = ui.config(b"remotefilelog", b"cachepath") | ||||
if not cachepath: | if not cachepath: | ||||
if allowempty: | if allowempty: | ||||
return None | return None | ||||
else: | else: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"could not find config option " b"remotefilelog.cachepath") | _(b"could not find config option remotefilelog.cachepath") | ||||
) | ) | ||||
return util.expandpath(cachepath) | return util.expandpath(cachepath) | ||||
def getcachepackpath(repo, category): | def getcachepackpath(repo, category): | ||||
cachepath = getcachepath(repo.ui) | cachepath = getcachepath(repo.ui) | ||||
if category != constants.FILEPACK_CATEGORY: | if category != constants.FILEPACK_CATEGORY: | ||||
return os.path.join(cachepath, repo.name, b'packs', category) | return os.path.join(cachepath, repo.name, b'packs', category) | ||||
return results | return results | ||||
def readexactly(stream, n): | def readexactly(stream, n): | ||||
'''read n bytes from stream.read and abort if less was available''' | '''read n bytes from stream.read and abort if less was available''' | ||||
s = stream.read(n) | s = stream.read(n) | ||||
if len(s) < n: | if len(s) < n: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"stream ended unexpectedly" b" (got %d bytes, expected %d)") | _(b"stream ended unexpectedly (got %d bytes, expected %d)") | ||||
% (len(s), n) | % (len(s), n) | ||||
) | ) | ||||
return s | return s | ||||
def readunpack(stream, fmt): | def readunpack(stream, fmt): | ||||
data = readexactly(stream, struct.calcsize(fmt)) | data = readexactly(stream, struct.calcsize(fmt)) | ||||
return struct.unpack(fmt, data) | return struct.unpack(fmt, data) |
b'share', | b'share', | ||||
[ | [ | ||||
(b'U', b'noupdate', None, _(b'do not create a working directory')), | (b'U', b'noupdate', None, _(b'do not create a working directory')), | ||||
(b'B', b'bookmarks', None, _(b'also share bookmarks')), | (b'B', b'bookmarks', None, _(b'also share bookmarks')), | ||||
( | ( | ||||
b'', | b'', | ||||
b'relative', | b'relative', | ||||
None, | None, | ||||
_(b'point to source using a relative path ' b'(EXPERIMENTAL)'), | _(b'point to source using a relative path (EXPERIMENTAL)'), | ||||
), | ), | ||||
], | ], | ||||
_(b'[-U] [-B] SOURCE [DEST]'), | _(b'[-U] [-B] SOURCE [DEST]'), | ||||
helpcategory=command.CATEGORY_REPO_CREATION, | helpcategory=command.CATEGORY_REPO_CREATION, | ||||
norepo=True, | norepo=True, | ||||
) | ) | ||||
def share( | def share( | ||||
ui, source, dest=None, noupdate=False, bookmarks=False, relative=False | ui, source, dest=None, noupdate=False, bookmarks=False, relative=False |
# won't cause conflicts for sure. | # won't cause conflicts for sure. | ||||
torebase = list( | torebase = list( | ||||
repo.revs( | repo.revs( | ||||
b'%ld - (%ld & obsolete())::', descendants, descendants | b'%ld - (%ld & obsolete())::', descendants, descendants | ||||
) | ) | ||||
) | ) | ||||
if not alloworphaned and len(torebase) != len(descendants): | if not alloworphaned and len(torebase) != len(descendants): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'split would leave orphaned changesets ' b'behind') | _(b'split would leave orphaned changesets behind') | ||||
) | ) | ||||
else: | else: | ||||
if not alloworphaned and descendants: | if not alloworphaned and descendants: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot split changeset with children without rebase') | _(b'cannot split changeset with children without rebase') | ||||
) | ) | ||||
torebase = () | torebase = () | ||||
if committed: | if committed: | ||||
header = _( | header = _( | ||||
b'HG: Splitting %s. So far it has been split into:\n' | b'HG: Splitting %s. So far it has been split into:\n' | ||||
) % short(ctx.node()) | ) % short(ctx.node()) | ||||
for c in committed: | for c in committed: | ||||
firstline = c.description().split(b'\n', 1)[0] | firstline = c.description().split(b'\n', 1)[0] | ||||
header += _(b'HG: - %s: %s\n') % (short(c.node()), firstline) | header += _(b'HG: - %s: %s\n') % (short(c.node()), firstline) | ||||
header += _( | header += _( | ||||
b'HG: Write commit message for the next split ' b'changeset.\n' | b'HG: Write commit message for the next split changeset.\n' | ||||
) | ) | ||||
else: | else: | ||||
header = _( | header = _( | ||||
b'HG: Splitting %s. Write commit message for the ' | b'HG: Splitting %s. Write commit message for the ' | ||||
b'first split changeset.\n' | b'first split changeset.\n' | ||||
) % short(ctx.node()) | ) % short(ctx.node()) | ||||
opts.update( | opts.update( | ||||
{ | { |
(self._pathid,), | (self._pathid,), | ||||
) | ) | ||||
for i, row in enumerate(res): | for i, row in enumerate(res): | ||||
rid, rev, node, p1rev, p2rev, linkrev, flags = row | rid, rev, node, p1rev, p2rev, linkrev, flags = row | ||||
if i != rev: | if i != rev: | ||||
raise SQLiteStoreError( | raise SQLiteStoreError( | ||||
_(b'sqlite database has inconsistent ' b'revision numbers') | _(b'sqlite database has inconsistent revision numbers') | ||||
) | ) | ||||
if p1rev == nullrev: | if p1rev == nullrev: | ||||
p1node = nullid | p1node = nullid | ||||
else: | else: | ||||
p1node = self._revtonode[p1rev] | p1node = self._revtonode[p1rev] | ||||
if p2rev == nullrev: | if p2rev == nullrev: | ||||
def censorrevision(self, tr, censornode, tombstone=b''): | def censorrevision(self, tr, censornode, tombstone=b''): | ||||
tombstone = storageutil.packmeta({b'censored': tombstone}, b'') | tombstone = storageutil.packmeta({b'censored': tombstone}, b'') | ||||
# This restriction is cargo culted from revlogs and makes no sense for | # This restriction is cargo culted from revlogs and makes no sense for | ||||
# SQLite, since columns can be resized at will. | # SQLite, since columns can be resized at will. | ||||
if len(tombstone) > len(self.rawdata(censornode)): | if len(tombstone) > len(self.rawdata(censornode)): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'censor tombstone must be no longer than ' b'censored data') | _(b'censor tombstone must be no longer than censored data') | ||||
) | ) | ||||
# We need to replace the censored revision's data with the tombstone. | # We need to replace the censored revision's data with the tombstone. | ||||
# But replacing that data will have implications for delta chains that | # But replacing that data will have implications for delta chains that | ||||
# reference it. | # reference it. | ||||
# | # | ||||
# While "better," more complex strategies are possible, we do something | # While "better," more complex strategies are possible, we do something | ||||
# simple: we find delta chain children of the censored revision and we | # simple: we find delta chain children of the censored revision and we | ||||
def newreporequirements(orig, ui, createopts): | def newreporequirements(orig, ui, createopts): | ||||
if createopts[b'backend'] != b'sqlite': | if createopts[b'backend'] != b'sqlite': | ||||
return orig(ui, createopts) | return orig(ui, createopts) | ||||
# This restriction can be lifted once we have more confidence. | # This restriction can be lifted once we have more confidence. | ||||
if b'sharedrepo' in createopts: | if b'sharedrepo' in createopts: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'shared repositories not supported with SQLite ' b'store') | _(b'shared repositories not supported with SQLite store') | ||||
) | ) | ||||
# This filtering is out of an abundance of caution: we want to ensure | # This filtering is out of an abundance of caution: we want to ensure | ||||
# we honor creation options and we do that by annotating exactly the | # we honor creation options and we do that by annotating exactly the | ||||
# creation options we recognize. | # creation options we recognize. | ||||
known = { | known = { | ||||
b'narrowfiles', | b'narrowfiles', | ||||
b'backend', | b'backend', | ||||
b'shallowfilestore', | b'shallowfilestore', | ||||
} | } | ||||
unsupported = set(createopts) - known | unsupported = set(createopts) - known | ||||
if unsupported: | if unsupported: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'SQLite store does not support repo creation ' b'option: %s') | _(b'SQLite store does not support repo creation option: %s') | ||||
% b', '.join(sorted(unsupported)) | % b', '.join(sorted(unsupported)) | ||||
) | ) | ||||
# Since we're a hybrid store that still relies on revlogs, we fall back | # Since we're a hybrid store that still relies on revlogs, we fall back | ||||
# to using the revlogv1 backend's storage requirements then adding our | # to using the revlogv1 backend's storage requirements then adding our | ||||
# own requirement. | # own requirement. | ||||
createopts[b'backend'] = b'revlogv1' | createopts[b'backend'] = b'revlogv1' | ||||
requirements = orig(ui, createopts) | requirements = orig(ui, createopts) |
b'force', | b'force', | ||||
None, | None, | ||||
_( | _( | ||||
b'force removal of changesets, discard ' | b'force removal of changesets, discard ' | ||||
b'uncommitted changes (no backup)' | b'uncommitted changes (no backup)' | ||||
), | ), | ||||
), | ), | ||||
(b'', b'no-backup', None, _(b'do not save backup bundle')), | (b'', b'no-backup', None, _(b'do not save backup bundle')), | ||||
( | (b'', b'nobackup', None, _(b'do not save backup bundle (DEPRECATED)'),), | ||||
b'', | |||||
b'nobackup', | |||||
None, | |||||
_(b'do not save backup bundle ' b'(DEPRECATED)'), | |||||
), | |||||
(b'n', b'', None, _(b'ignored (DEPRECATED)')), | (b'n', b'', None, _(b'ignored (DEPRECATED)')), | ||||
( | ( | ||||
b'k', | b'k', | ||||
b'keep', | b'keep', | ||||
None, | None, | ||||
_(b"do not modify working directory during " b"strip"), | _(b"do not modify working directory during strip"), | ||||
), | ), | ||||
( | ( | ||||
b'B', | b'B', | ||||
b'bookmark', | b'bookmark', | ||||
[], | [], | ||||
_(b"remove revs only reachable from given" b" bookmark"), | _(b"remove revs only reachable from given bookmark"), | ||||
_(b'BOOKMARK'), | _(b'BOOKMARK'), | ||||
), | ), | ||||
( | ( | ||||
b'', | b'', | ||||
b'soft', | b'soft', | ||||
None, | None, | ||||
_(b"simply drop changesets from visible history (EXPERIMENTAL)"), | _(b"simply drop changesets from visible history (EXPERIMENTAL)"), | ||||
), | ), |
else: | else: | ||||
merge = True | merge = True | ||||
extra = {b'transplant_source': node} | extra = {b'transplant_source': node} | ||||
try: | try: | ||||
p1 = repo.dirstate.p1() | p1 = repo.dirstate.p1() | ||||
if p1 != parent: | if p1 != parent: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'working directory not at transplant ' b'parent %s') | _(b'working directory not at transplant parent %s') | ||||
% nodemod.hex(parent) | % nodemod.hex(parent) | ||||
) | ) | ||||
if merge: | if merge: | ||||
repo.setparents(p1, parents[1]) | repo.setparents(p1, parents[1]) | ||||
modified, added, removed, deleted = repo.status()[:4] | modified, added, removed, deleted = repo.status()[:4] | ||||
if merge or modified or added or removed or deleted: | if merge or modified or added or removed or deleted: | ||||
n = repo.commit( | n = repo.commit( | ||||
message, | message, | ||||
), | ), | ||||
(b'e', b'edit', False, _(b'invoke editor on commit messages')), | (b'e', b'edit', False, _(b'invoke editor on commit messages')), | ||||
(b'', b'log', None, _(b'append transplant info to log message')), | (b'', b'log', None, _(b'append transplant info to log message')), | ||||
(b'', b'stop', False, _(b'stop interrupted transplant')), | (b'', b'stop', False, _(b'stop interrupted transplant')), | ||||
( | ( | ||||
b'c', | b'c', | ||||
b'continue', | b'continue', | ||||
None, | None, | ||||
_(b'continue last transplant session ' b'after fixing conflicts'), | _(b'continue last transplant session after fixing conflicts'), | ||||
), | ), | ||||
( | ( | ||||
b'', | b'', | ||||
b'filter', | b'filter', | ||||
b'', | b'', | ||||
_(b'filter changesets through command'), | _(b'filter changesets through command'), | ||||
_(b'CMD'), | _(b'CMD'), | ||||
), | ), | ||||
b'list provided' | b'list provided' | ||||
) | ) | ||||
) | ) | ||||
if opts.get(b'all'): | if opts.get(b'all'): | ||||
if not opts.get(b'branch'): | if not opts.get(b'branch'): | ||||
raise error.Abort(_(b'--all requires a branch revision')) | raise error.Abort(_(b'--all requires a branch revision')) | ||||
if revs: | if revs: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'--all is incompatible with a ' b'revision list') | _(b'--all is incompatible with a revision list') | ||||
) | ) | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
checkopts(opts, revs) | checkopts(opts, revs) | ||||
if not opts.get(b'log'): | if not opts.get(b'log'): | ||||
# deprecated config: transplant.log | # deprecated config: transplant.log | ||||
opts[b'log'] = ui.config(b'transplant', b'log') | opts[b'log'] = ui.config(b'transplant', b'log') |
m, a, r, d = repo.status()[:4] | m, a, r, d = repo.status()[:4] | ||||
isdirtypath = any(set(m + a + r + d) & set(pats)) | isdirtypath = any(set(m + a + r + d) & set(pats)) | ||||
allowdirtywcopy = opts[ | allowdirtywcopy = opts[ | ||||
b'allow_dirty_working_copy' | b'allow_dirty_working_copy' | ||||
] or repo.ui.configbool(b'experimental', b'uncommitondirtywdir') | ] or repo.ui.configbool(b'experimental', b'uncommitondirtywdir') | ||||
if not allowdirtywcopy and (not pats or isdirtypath): | if not allowdirtywcopy and (not pats or isdirtypath): | ||||
cmdutil.bailifchanged( | cmdutil.bailifchanged( | ||||
repo, | repo, | ||||
hint=_(b'requires ' b'--allow-dirty-working-copy to uncommit'), | hint=_(b'requires --allow-dirty-working-copy to uncommit'), | ||||
) | ) | ||||
old = repo[b'.'] | old = repo[b'.'] | ||||
rewriteutil.precheck(repo, [old.rev()], b'uncommit') | rewriteutil.precheck(repo, [old.rev()], b'uncommit') | ||||
if len(old.parents()) > 1: | if len(old.parents()) > 1: | ||||
raise error.Abort(_(b"cannot uncommit merge changeset")) | raise error.Abort(_(b"cannot uncommit merge changeset")) | ||||
match = scmutil.match(old, pats, opts) | match = scmutil.match(old, pats, opts) | ||||
# Check all explicitly given files; abort if there's a problem. | # Check all explicitly given files; abort if there's a problem. | ||||
if match.files(): | if match.files(): | ||||
s = old.status(old.p1(), match, listclean=True) | s = old.status(old.p1(), match, listclean=True) | ||||
eligible = set(s.added) | set(s.modified) | set(s.removed) | eligible = set(s.added) | set(s.modified) | set(s.removed) | ||||
badfiles = set(match.files()) - eligible | badfiles = set(match.files()) - eligible | ||||
# Naming a parent directory of an eligible file is OK, even | # Naming a parent directory of an eligible file is OK, even | ||||
# if not everything tracked in that directory can be | # if not everything tracked in that directory can be | ||||
# uncommitted. | # uncommitted. | ||||
if badfiles: | if badfiles: | ||||
badfiles -= {f for f in util.dirs(eligible)} | badfiles -= {f for f in util.dirs(eligible)} | ||||
for f in sorted(badfiles): | for f in sorted(badfiles): | ||||
if f in s.clean: | if f in s.clean: | ||||
hint = _( | hint = _( | ||||
b"file was not changed in working directory " b"parent" | b"file was not changed in working directory parent" | ||||
) | ) | ||||
elif repo.wvfs.exists(f): | elif repo.wvfs.exists(f): | ||||
hint = _(b"file was untracked in working directory parent") | hint = _(b"file was untracked in working directory parent") | ||||
else: | else: | ||||
hint = _(b"file does not exist") | hint = _(b"file does not exist") | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot uncommit "%s"') % scmutil.getuipathfn(repo)(f), | _(b'cannot uncommit "%s"') % scmutil.getuipathfn(repo)(f), |
return func(*args, **kwds) | return func(*args, **kwds) | ||||
try: | try: | ||||
# convert string arguments, call func, then convert back the | # convert string arguments, call func, then convert back the | ||||
# return value. | # return value. | ||||
return enc(func(*dec(args), **dec(kwds))) | return enc(func(*dec(args), **dec(kwds))) | ||||
except UnicodeError: | except UnicodeError: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"[win32mbcs] filename conversion failed with" b" %s encoding\n") | _(b"[win32mbcs] filename conversion failed with %s encoding\n") | ||||
% _encoding | % _encoding | ||||
) | ) | ||||
def wrapper(func, args, kwds): | def wrapper(func, args, kwds): | ||||
return basewrapper(func, pycompat.unicode, encode, decode, args, kwds) | return basewrapper(func, pycompat.unicode, encode, decode, args, kwds) | ||||
delbms = divergent2delete(self._repo, deletefrom, mark) | delbms = divergent2delete(self._repo, deletefrom, mark) | ||||
if validdest(self._repo, bmctx, self._repo[target]): | if validdest(self._repo, bmctx, self._repo[target]): | ||||
self._repo.ui.status( | self._repo.ui.status( | ||||
_(b"moving bookmark '%s' forward from %s\n") | _(b"moving bookmark '%s' forward from %s\n") | ||||
% (mark, short(bmctx.node())) | % (mark, short(bmctx.node())) | ||||
) | ) | ||||
return delbms | return delbms | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"bookmark '%s' already exists " b"(use -f to force)") % mark | _(b"bookmark '%s' already exists (use -f to force)") % mark | ||||
) | ) | ||||
if ( | if ( | ||||
mark in self._repo.branchmap() | mark in self._repo.branchmap() | ||||
or mark == self._repo.dirstate.branch() | or mark == self._repo.dirstate.branch() | ||||
) and not force: | ) and not force: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"a bookmark cannot have the name of an existing branch") | _(b"a bookmark cannot have the name of an existing branch") | ||||
) | ) | ||||
def checkformat(repo, mark): | def checkformat(repo, mark): | ||||
"""return a valid version of a potential bookmark name | """return a valid version of a potential bookmark name | ||||
Raises an abort error if the bookmark name is not valid. | Raises an abort error if the bookmark name is not valid. | ||||
""" | """ | ||||
mark = mark.strip() | mark = mark.strip() | ||||
if not mark: | if not mark: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"bookmark names cannot consist entirely of " b"whitespace") | _(b"bookmark names cannot consist entirely of whitespace") | ||||
) | ) | ||||
scmutil.checknewlabel(repo, mark, b'bookmark') | scmutil.checknewlabel(repo, mark, b'bookmark') | ||||
return mark | return mark | ||||
def delete(repo, tr, names): | def delete(repo, tr, names): | ||||
"""remove a mark from the bookmark store | """remove a mark from the bookmark store | ||||
indebug(self.ui, b'part header size: %i\n' % headersize) | indebug(self.ui, b'part header size: %i\n' % headersize) | ||||
if headersize: | if headersize: | ||||
return self._readexact(headersize) | return self._readexact(headersize) | ||||
return None | return None | ||||
def __call__(self): | def __call__(self): | ||||
self.ui.debug( | self.ui.debug( | ||||
b'bundle2-input-stream-interrupt:' b' opening out of band context\n' | b'bundle2-input-stream-interrupt: opening out of band context\n' | ||||
) | ) | ||||
indebug(self.ui, b'bundle2 stream interruption, looking for a part.') | indebug(self.ui, b'bundle2 stream interruption, looking for a part.') | ||||
headerblock = self._readpartheader() | headerblock = self._readpartheader() | ||||
if headerblock is None: | if headerblock is None: | ||||
indebug(self.ui, b'no part found during interruption.') | indebug(self.ui, b'no part found during interruption.') | ||||
return | return | ||||
part = unbundlepart(self.ui, headerblock, self._fp) | part = unbundlepart(self.ui, headerblock, self._fp) | ||||
op = interruptoperation(self.ui) | op = interruptoperation(self.ui) | ||||
hardabort = False | hardabort = False | ||||
try: | try: | ||||
_processpart(op, part) | _processpart(op, part) | ||||
except (SystemExit, KeyboardInterrupt): | except (SystemExit, KeyboardInterrupt): | ||||
hardabort = True | hardabort = True | ||||
raise | raise | ||||
finally: | finally: | ||||
if not hardabort: | if not hardabort: | ||||
part.consume() | part.consume() | ||||
self.ui.debug( | self.ui.debug( | ||||
b'bundle2-input-stream-interrupt:' b' closing out of band context\n' | b'bundle2-input-stream-interrupt: closing out of band context\n' | ||||
) | ) | ||||
class interruptoperation(object): | class interruptoperation(object): | ||||
"""A limited operation to be use by part handler during interruption | """A limited operation to be use by part handler during interruption | ||||
It only have access to an ui object. | It only have access to an ui object. | ||||
""" | """ | ||||
else: | else: | ||||
raise error.BundleValueError( | raise error.BundleValueError( | ||||
b'negative payload chunk size: %s' % chunksize | b'negative payload chunk size: %s' % chunksize | ||||
) | ) | ||||
s = read(headersize) | s = read(headersize) | ||||
if len(s) < headersize: | if len(s) < headersize: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'stream ended unexpectedly ' b' (got %d bytes, expected %d)') | _(b'stream ended unexpectedly (got %d bytes, expected %d)') | ||||
% (len(s), chunksize) | % (len(s), chunksize) | ||||
) | ) | ||||
chunksize = unpack(s)[0] | chunksize = unpack(s)[0] | ||||
# indebug() inlined for performance. | # indebug() inlined for performance. | ||||
if dolog: | if dolog: | ||||
debug(b'bundle2-input: payload chunk size: %i\n' % chunksize) | debug(b'bundle2-input: payload chunk size: %i\n' % chunksize) | ||||
b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False | b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False | ||||
) | ) | ||||
chunkiter = bundle.getchunks() | chunkiter = bundle.getchunks() | ||||
else: | else: | ||||
# compression argument is only for the bundle2 case | # compression argument is only for the bundle2 case | ||||
assert compression is None | assert compression is None | ||||
if cg.version != b'01': | if cg.version != b'01': | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'old bundle types only supports v1 ' b'changegroups') | _(b'old bundle types only supports v1 changegroups') | ||||
) | ) | ||||
header, comp = bundletypes[bundletype] | header, comp = bundletypes[bundletype] | ||||
if comp not in util.compengines.supportedbundletypes: | if comp not in util.compengines.supportedbundletypes: | ||||
raise error.Abort(_(b'unknown stream compression type: %s') % comp) | raise error.Abort(_(b'unknown stream compression type: %s') % comp) | ||||
compengine = util.compengines.forbundletype(comp) | compengine = util.compengines.forbundletype(comp) | ||||
def chunkiter(): | def chunkiter(): | ||||
yield header | yield header | ||||
heads.append(h) | heads.append(h) | ||||
h = inpart.read(20) | h = inpart.read(20) | ||||
assert not h | assert not h | ||||
# Trigger a transaction so that we are guaranteed to have the lock now. | # Trigger a transaction so that we are guaranteed to have the lock now. | ||||
if op.ui.configbool(b'experimental', b'bundle2lazylocking'): | if op.ui.configbool(b'experimental', b'bundle2lazylocking'): | ||||
op.gettransaction() | op.gettransaction() | ||||
if sorted(heads) != sorted(op.repo.heads()): | if sorted(heads) != sorted(op.repo.heads()): | ||||
raise error.PushRaced( | raise error.PushRaced( | ||||
b'remote repository changed while pushing - ' b'please try again' | b'remote repository changed while pushing - please try again' | ||||
) | ) | ||||
@parthandler(b'check:updated-heads') | @parthandler(b'check:updated-heads') | ||||
def handlecheckupdatedheads(op, inpart): | def handlecheckupdatedheads(op, inpart): | ||||
"""check for race on the heads touched by a push | """check for race on the heads touched by a push | ||||
This is similar to 'check:heads' but focus on the heads actually updated | This is similar to 'check:heads' but focus on the heads actually updated |
self._bundlefile = bundle | self._bundlefile = bundle | ||||
self._cgunpacker = None | self._cgunpacker = None | ||||
cgpart = None | cgpart = None | ||||
for part in bundle.iterparts(seekable=True): | for part in bundle.iterparts(seekable=True): | ||||
if part.type == b'changegroup': | if part.type == b'changegroup': | ||||
if cgpart: | if cgpart: | ||||
raise NotImplementedError( | raise NotImplementedError( | ||||
b"can't process " b"multiple changegroups" | b"can't process multiple changegroups" | ||||
) | ) | ||||
cgpart = part | cgpart = part | ||||
self._handlebundle2part(bundle, part) | self._handlebundle2part(bundle, part) | ||||
if not cgpart: | if not cgpart: | ||||
raise error.Abort(_(b"No changegroups found")) | raise error.Abort(_(b"No changegroups found")) | ||||
if matcher is None: | if matcher is None: | ||||
matcher = matchmod.always() | matcher = matchmod.always() | ||||
if oldmatcher is None: | if oldmatcher is None: | ||||
oldmatcher = matchmod.never() | oldmatcher = matchmod.never() | ||||
if version == b'01' and not matcher.always(): | if version == b'01' and not matcher.always(): | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'version 01 changegroups do not support ' b'sparse file matchers' | b'version 01 changegroups do not support sparse file matchers' | ||||
) | ) | ||||
if ellipses and version in (b'01', b'02'): | if ellipses and version in (b'01', b'02'): | ||||
raise error.Abort( | raise error.Abort( | ||||
_( | _( | ||||
b'ellipsis nodes require at least cg3 on client and server, ' | b'ellipsis nodes require at least cg3 on client and server, ' | ||||
b'but negotiated version %s' | b'but negotiated version %s' | ||||
) | ) |
def resolvecommitoptions(ui, opts): | def resolvecommitoptions(ui, opts): | ||||
"""modify commit options dict to handle related options | """modify commit options dict to handle related options | ||||
The return value indicates that ``rewrite.update-timestamp`` is the reason | The return value indicates that ``rewrite.update-timestamp`` is the reason | ||||
the ``date`` option is set. | the ``date`` option is set. | ||||
""" | """ | ||||
if opts.get(b'date') and opts.get(b'currentdate'): | if opts.get(b'date') and opts.get(b'currentdate'): | ||||
raise error.Abort( | raise error.Abort(_(b'--date and --currentdate are mutually exclusive')) | ||||
_(b'--date and --currentdate are mutually ' b'exclusive') | |||||
) | |||||
if opts.get(b'user') and opts.get(b'currentuser'): | if opts.get(b'user') and opts.get(b'currentuser'): | ||||
raise error.Abort( | raise error.Abort(_(b'--user and --currentuser are mutually exclusive')) | ||||
_(b'--user and --currentuser are mutually ' b'exclusive') | |||||
) | |||||
datemaydiffer = False # date-only change should be ignored? | datemaydiffer = False # date-only change should be ignored? | ||||
if opts.get(b'currentdate'): | if opts.get(b'currentdate'): | ||||
opts[b'date'] = b'%d %d' % dateutil.makedate() | opts[b'date'] = b'%d %d' % dateutil.makedate() | ||||
elif ( | elif ( | ||||
not opts.get(b'date') | not opts.get(b'date') | ||||
and ui.configbool(b'rewrite', b'update-timestamp') | and ui.configbool(b'rewrite', b'update-timestamp') | ||||
def logmessage(ui, opts): | def logmessage(ui, opts): | ||||
""" get the log message according to -m and -l option """ | """ get the log message according to -m and -l option """ | ||||
message = opts.get(b'message') | message = opts.get(b'message') | ||||
logfile = opts.get(b'logfile') | logfile = opts.get(b'logfile') | ||||
if message and logfile: | if message and logfile: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'options --message and --logfile are mutually ' b'exclusive') | _(b'options --message and --logfile are mutually exclusive') | ||||
) | ) | ||||
if not message and logfile: | if not message and logfile: | ||||
try: | try: | ||||
if isstdiofilename(logfile): | if isstdiofilename(logfile): | ||||
message = ui.fin.read() | message = ui.fin.read() | ||||
else: | else: | ||||
message = b'\n'.join(util.readfile(logfile).splitlines()) | message = b'\n'.join(util.readfile(logfile).splitlines()) | ||||
except IOError as inst: | except IOError as inst: | ||||
while i < end: | while i < end: | ||||
n = pat.find(b'%', i, end) | n = pat.find(b'%', i, end) | ||||
if n < 0: | if n < 0: | ||||
newname.append(stringutil.escapestr(pat[i:end])) | newname.append(stringutil.escapestr(pat[i:end])) | ||||
break | break | ||||
newname.append(stringutil.escapestr(pat[i:n])) | newname.append(stringutil.escapestr(pat[i:n])) | ||||
if n + 2 > end: | if n + 2 > end: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"incomplete format spec in output " b"filename") | _(b"incomplete format spec in output filename") | ||||
) | ) | ||||
c = pat[n + 1 : n + 2] | c = pat[n + 1 : n + 2] | ||||
i = n + 2 | i = n + 2 | ||||
try: | try: | ||||
newname.append(expander[c]) | newname.append(expander[c]) | ||||
except KeyError: | except KeyError: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"invalid format spec '%%%s' in output " b"filename") % c | _(b"invalid format spec '%%%s' in output filename") % c | ||||
) | ) | ||||
return b''.join(newname) | return b''.join(newname) | ||||
def makefilename(ctx, pat, **props): | def makefilename(ctx, pat, **props): | ||||
if not pat: | if not pat: | ||||
return pat | return pat | ||||
tmpl = _buildfntemplate(pat, **props) | tmpl = _buildfntemplate(pat, **props) | ||||
return [] | return [] | ||||
if slowpath: | if slowpath: | ||||
# We have to read the changelog to match filenames against | # We have to read the changelog to match filenames against | ||||
# changed files | # changed files | ||||
if follow: | if follow: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'can only follow copies/renames for explicit ' b'filenames') | _(b'can only follow copies/renames for explicit filenames') | ||||
) | ) | ||||
# The slow path checks files modified in every changeset. | # The slow path checks files modified in every changeset. | ||||
# This is really slow on large repos, so compute the set lazily. | # This is really slow on large repos, so compute the set lazily. | ||||
class lazywantedset(object): | class lazywantedset(object): | ||||
def __init__(self): | def __init__(self): | ||||
self.set = set() | self.set = set() | ||||
self.revs = set(revs) | self.revs = set(revs) |
force=True, | force=True, | ||||
ancestor=node, | ancestor=node, | ||||
mergeancestor=False, | mergeancestor=False, | ||||
) | ) | ||||
repo.setparents(op1, op2) | repo.setparents(op1, op2) | ||||
hg._showstats(repo, stats) | hg._showstats(repo, stats) | ||||
if stats.unresolvedcount: | if stats.unresolvedcount: | ||||
repo.ui.status( | repo.ui.status( | ||||
_(b"use 'hg resolve' to retry unresolved " b"file merges\n") | _(b"use 'hg resolve' to retry unresolved file merges\n") | ||||
) | ) | ||||
return 1 | return 1 | ||||
else: | else: | ||||
hg.clean(repo, node, show_stats=False) | hg.clean(repo, node, show_stats=False) | ||||
repo.dirstate.setbranch(branch) | repo.dirstate.setbranch(branch) | ||||
cmdutil.revert(ui, repo, rctx, repo.dirstate.parents()) | cmdutil.revert(ui, repo, rctx, repo.dirstate.parents()) | ||||
if opts.get(b'no_commit'): | if opts.get(b'no_commit'): | ||||
msg = _(b"changeset %s backed out, " b"don't forget to commit.\n") | msg = _(b"changeset %s backed out, don't forget to commit.\n") | ||||
ui.status(msg % short(node)) | ui.status(msg % short(node)) | ||||
return 0 | return 0 | ||||
def commitfunc(ui, repo, message, match, opts): | def commitfunc(ui, repo, message, match, opts): | ||||
editform = b'backout' | editform = b'backout' | ||||
e = cmdutil.getcommiteditor( | e = cmdutil.getcommiteditor( | ||||
editform=editform, **pycompat.strkwargs(opts) | editform=editform, **pycompat.strkwargs(opts) | ||||
) | ) | ||||
scmutil.checknewlabel(repo, label, b'branch') | scmutil.checknewlabel(repo, label, b'branch') | ||||
if revs: | if revs: | ||||
return cmdutil.changebranch(ui, repo, revs, label) | return cmdutil.changebranch(ui, repo, revs, label) | ||||
if not opts.get(b'force') and label in repo.branchmap(): | if not opts.get(b'force') and label in repo.branchmap(): | ||||
if label not in [p.branch() for p in repo[None].parents()]: | if label not in [p.branch() for p in repo[None].parents()]: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'a branch of the same name already' b' exists'), | _(b'a branch of the same name already exists'), | ||||
# i18n: "it" refers to an existing branch | # i18n: "it" refers to an existing branch | ||||
hint=_(b"use 'hg update' to switch to it"), | hint=_(b"use 'hg update' to switch to it"), | ||||
) | ) | ||||
repo.dirstate.setbranch(label) | repo.dirstate.setbranch(label) | ||||
ui.status(_(b'marked working directory as branch %s\n') % label) | ui.status(_(b'marked working directory as branch %s\n') % label) | ||||
# find any open named branches aside from default | # find any open named branches aside from default | ||||
raise error.Abort(_(b'no commits to bundle')) | raise error.Abort(_(b'no commits to bundle')) | ||||
bundletype = opts.get(b'type', b'bzip2').lower() | bundletype = opts.get(b'type', b'bzip2').lower() | ||||
try: | try: | ||||
bundlespec = exchange.parsebundlespec(repo, bundletype, strict=False) | bundlespec = exchange.parsebundlespec(repo, bundletype, strict=False) | ||||
except error.UnsupportedBundleSpecification as e: | except error.UnsupportedBundleSpecification as e: | ||||
raise error.Abort( | raise error.Abort( | ||||
pycompat.bytestr(e), | pycompat.bytestr(e), | ||||
hint=_( | hint=_(b"see 'hg help bundlespec' for supported values for --type"), | ||||
b"see 'hg help bundlespec' for supported " b"values for --type" | |||||
), | |||||
) | ) | ||||
cgversion = bundlespec.contentopts[b"cg.version"] | cgversion = bundlespec.contentopts[b"cg.version"] | ||||
# Packed bundles are a pseudo bundle format for now. | # Packed bundles are a pseudo bundle format for now. | ||||
if cgversion == b's1': | if cgversion == b's1': | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'packed bundles cannot be produced by "hg bundle"'), | _(b'packed bundles cannot be produced by "hg bundle"'), | ||||
hint=_(b"use 'hg debugcreatestreamclonebundle'"), | hint=_(b"use 'hg debugcreatestreamclonebundle'"), | ||||
) | ) | ||||
if opts.get(b'all'): | if opts.get(b'all'): | ||||
if dest: | if dest: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"--all is incompatible with specifying " b"a destination") | _(b"--all is incompatible with specifying a destination") | ||||
) | ) | ||||
if opts.get(b'base'): | if opts.get(b'base'): | ||||
ui.warn(_(b"ignoring --base because --all was specified\n")) | ui.warn(_(b"ignoring --base because --all was specified\n")) | ||||
base = [nullrev] | base = [nullrev] | ||||
else: | else: | ||||
base = scmutil.revrange(repo, opts.get(b'base')) | base = scmutil.revrange(repo, opts.get(b'base')) | ||||
if cgversion not in changegroup.supportedoutgoingversions(repo): | if cgversion not in changegroup.supportedoutgoingversions(repo): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"repository does not support bundle version %s") % cgversion | _(b"repository does not support bundle version %s") % cgversion | ||||
) | ) | ||||
if base: | if base: | ||||
if dest: | if dest: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"--base is incompatible with specifying " b"a destination") | _(b"--base is incompatible with specifying a destination") | ||||
) | ) | ||||
common = [repo[rev].node() for rev in base] | common = [repo[rev].node() for rev in base] | ||||
heads = [repo[r].node() for r in revs] if revs else None | heads = [repo[r].node() for r in revs] if revs else None | ||||
outgoing = discovery.outgoing(repo, common, heads) | outgoing = discovery.outgoing(repo, common, heads) | ||||
else: | else: | ||||
dest = ui.expandpath(dest or b'default-push', dest or b'default') | dest = ui.expandpath(dest or b'default-push', dest or b'default') | ||||
dest, branches = hg.parseurl(dest, opts.get(b'branch')) | dest, branches = hg.parseurl(dest, opts.get(b'branch')) | ||||
other = hg.peer(repo, opts, dest) | other = hg.peer(repo, opts, dest) | ||||
bheads = repo.branchheads(branch) | bheads = repo.branchheads(branch) | ||||
extra = {} | extra = {} | ||||
if opts.get(b'close_branch') or opts.get(b'force_close_branch'): | if opts.get(b'close_branch') or opts.get(b'force_close_branch'): | ||||
extra[b'close'] = b'1' | extra[b'close'] = b'1' | ||||
if repo[b'.'].closesbranch(): | if repo[b'.'].closesbranch(): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'current revision is already a branch closing' b' head') | _(b'current revision is already a branch closing head') | ||||
) | ) | ||||
elif not bheads: | elif not bheads: | ||||
raise error.Abort(_(b'branch "%s" has no heads to close') % branch) | raise error.Abort(_(b'branch "%s" has no heads to close') % branch) | ||||
elif ( | elif ( | ||||
branch == repo[b'.'].branch() | branch == repo[b'.'].branch() | ||||
and repo[b'.'].node() not in bheads | and repo[b'.'].node() not in bheads | ||||
and not opts.get(b'force_close_branch') | and not opts.get(b'force_close_branch') | ||||
): | ): | ||||
""" | """ | ||||
dryrun = opts.get(r'dry_run') | dryrun = opts.get(r'dry_run') | ||||
contstate = cmdutil.getunfinishedstate(repo) | contstate = cmdutil.getunfinishedstate(repo) | ||||
if not contstate: | if not contstate: | ||||
raise error.Abort(_(b'no operation in progress')) | raise error.Abort(_(b'no operation in progress')) | ||||
if not contstate.continuefunc: | if not contstate.continuefunc: | ||||
raise error.Abort( | raise error.Abort( | ||||
( | ( | ||||
_(b"%s in progress but does not support " b"'hg continue'") | _(b"%s in progress but does not support 'hg continue'") | ||||
% (contstate._opname) | % (contstate._opname) | ||||
), | ), | ||||
hint=contstate.continuemsg(), | hint=contstate.continuemsg(), | ||||
) | ) | ||||
if dryrun: | if dryrun: | ||||
ui.status(_(b'%s in progress, will be resumed\n') % (contstate._opname)) | ui.status(_(b'%s in progress, will be resumed\n') % (contstate._opname)) | ||||
return | return | ||||
return contstate.continuefunc(ui, repo) | return contstate.continuefunc(ui, repo) | ||||
editor = cmdutil.getcommiteditor( | editor = cmdutil.getcommiteditor( | ||||
editform=b'graft', **pycompat.strkwargs(opts) | editform=b'graft', **pycompat.strkwargs(opts) | ||||
) | ) | ||||
cont = False | cont = False | ||||
if opts.get(b'no_commit'): | if opts.get(b'no_commit'): | ||||
if opts.get(b'edit'): | if opts.get(b'edit'): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"cannot specify --no-commit and " b"--edit together") | _(b"cannot specify --no-commit and --edit together") | ||||
) | ) | ||||
if opts.get(b'currentuser'): | if opts.get(b'currentuser'): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"cannot specify --no-commit and " b"--currentuser together") | _(b"cannot specify --no-commit and --currentuser together") | ||||
) | ) | ||||
if opts.get(b'currentdate'): | if opts.get(b'currentdate'): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"cannot specify --no-commit and " b"--currentdate together") | _(b"cannot specify --no-commit and --currentdate together") | ||||
) | ) | ||||
if opts.get(b'log'): | if opts.get(b'log'): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"cannot specify --no-commit and " b"--log together") | _(b"cannot specify --no-commit and --log together") | ||||
) | ) | ||||
graftstate = statemod.cmdstate(repo, b'graftstate') | graftstate = statemod.cmdstate(repo, b'graftstate') | ||||
if opts.get(b'stop'): | if opts.get(b'stop'): | ||||
if opts.get(b'continue'): | if opts.get(b'continue'): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"cannot use '--continue' and " b"'--stop' together") | _(b"cannot use '--continue' and '--stop' together") | ||||
) | ) | ||||
if opts.get(b'abort'): | if opts.get(b'abort'): | ||||
raise error.Abort(_(b"cannot use '--abort' and '--stop' together")) | raise error.Abort(_(b"cannot use '--abort' and '--stop' together")) | ||||
if any( | if any( | ||||
( | ( | ||||
opts.get(b'edit'), | opts.get(b'edit'), | ||||
opts.get(b'log'), | opts.get(b'log'), | ||||
opts.get(b'user'), | opts.get(b'user'), | ||||
opts.get(b'date'), | opts.get(b'date'), | ||||
opts.get(b'currentdate'), | opts.get(b'currentdate'), | ||||
opts.get(b'currentuser'), | opts.get(b'currentuser'), | ||||
opts.get(b'rev'), | opts.get(b'rev'), | ||||
) | ) | ||||
): | ): | ||||
raise error.Abort(_(b"cannot specify any other flag with '--stop'")) | raise error.Abort(_(b"cannot specify any other flag with '--stop'")) | ||||
return _stopgraft(ui, repo, graftstate) | return _stopgraft(ui, repo, graftstate) | ||||
elif opts.get(b'abort'): | elif opts.get(b'abort'): | ||||
if opts.get(b'continue'): | if opts.get(b'continue'): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"cannot use '--continue' and " b"'--abort' together") | _(b"cannot use '--continue' and '--abort' together") | ||||
) | ) | ||||
if any( | if any( | ||||
( | ( | ||||
opts.get(b'edit'), | opts.get(b'edit'), | ||||
opts.get(b'log'), | opts.get(b'log'), | ||||
opts.get(b'user'), | opts.get(b'user'), | ||||
opts.get(b'date'), | opts.get(b'date'), | ||||
opts.get(b'currentdate'), | opts.get(b'currentdate'), | ||||
including full hash identifiers. | including full hash identifiers. | ||||
Returns 0 if successful. | Returns 0 if successful. | ||||
""" | """ | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
if not repo and not source: | if not repo and not source: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"there is no Mercurial repository here " b"(.hg not found)") | _(b"there is no Mercurial repository here (.hg not found)") | ||||
) | ) | ||||
default = not (num or id or branch or tags or bookmarks) | default = not (num or id or branch or tags or bookmarks) | ||||
output = [] | output = [] | ||||
revs = [] | revs = [] | ||||
if source: | if source: | ||||
source, branches = hg.parseurl(ui.expandpath(source)) | source, branches = hg.parseurl(ui.expandpath(source)) | ||||
statemod.addunfinished( | statemod.addunfinished( | ||||
b'merge', | b'merge', | ||||
fname=None, | fname=None, | ||||
clearable=True, | clearable=True, | ||||
allowcommit=True, | allowcommit=True, | ||||
cmdmsg=_(b'outstanding uncommitted merge'), | cmdmsg=_(b'outstanding uncommitted merge'), | ||||
abortfunc=hg.abortmerge, | abortfunc=hg.abortmerge, | ||||
statushint=_( | statushint=_( | ||||
b'To continue: hg commit\n' b'To abort: hg merge --abort' | b'To continue: hg commit\nTo abort: hg merge --abort' | ||||
), | ), | ||||
cmdhint=_(b"use 'hg commit' or 'hg merge --abort'"), | cmdhint=_(b"use 'hg commit' or 'hg merge --abort'"), | ||||
) | ) | ||||
@command( | @command( | ||||
b'outgoing|out', | b'outgoing|out', | ||||
[ | [ | ||||
if modheads is not None and modheads > 1: | if modheads is not None and modheads > 1: | ||||
currentbranchheads = len(repo.branchheads()) | currentbranchheads = len(repo.branchheads()) | ||||
if currentbranchheads == modheads: | if currentbranchheads == modheads: | ||||
ui.status( | ui.status( | ||||
_(b"(run 'hg heads' to see heads, 'hg merge' to merge)\n") | _(b"(run 'hg heads' to see heads, 'hg merge' to merge)\n") | ||||
) | ) | ||||
elif currentbranchheads > 1: | elif currentbranchheads > 1: | ||||
ui.status( | ui.status( | ||||
_(b"(run 'hg heads .' to see heads, 'hg merge' to " b"merge)\n") | _(b"(run 'hg heads .' to see heads, 'hg merge' to merge)\n") | ||||
) | ) | ||||
else: | else: | ||||
ui.status(_(b"(run 'hg heads' to see heads)\n")) | ui.status(_(b"(run 'hg heads' to see heads)\n")) | ||||
elif not ui.configbool(b'commands', b'update.requiredest'): | elif not ui.configbool(b'commands', b'update.requiredest'): | ||||
ui.status(_(b"(run 'hg update' to get a working copy)\n")) | ui.status(_(b"(run 'hg update' to get a working copy)\n")) | ||||
@command( | @command( | ||||
elif path.pushrev: | elif path.pushrev: | ||||
# It doesn't make any sense to specify ancestor revisions. So limit | # It doesn't make any sense to specify ancestor revisions. So limit | ||||
# to DAG heads to make discovery simpler. | # to DAG heads to make discovery simpler. | ||||
expr = revsetlang.formatspec(b'heads(%r)', path.pushrev) | expr = revsetlang.formatspec(b'heads(%r)', path.pushrev) | ||||
revs = scmutil.revrange(repo, [expr]) | revs = scmutil.revrange(repo, [expr]) | ||||
revs = [repo[rev].node() for rev in revs] | revs = [repo[rev].node() for rev in revs] | ||||
if not revs: | if not revs: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'default push revset for path evaluates to an ' b'empty set') | _(b'default push revset for path evaluates to an empty set') | ||||
) | ) | ||||
repo._subtoppath = dest | repo._subtoppath = dest | ||||
try: | try: | ||||
# push subrepos depth-first for coherent ordering | # push subrepos depth-first for coherent ordering | ||||
c = repo[b'.'] | c = repo[b'.'] | ||||
subs = c.substate # only repos that are committed | subs = c.substate # only repos that are committed | ||||
for s in sorted(subs): | for s in sorted(subs): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'no files or directories specified'), | _(b'no files or directories specified'), | ||||
hint=b'use --all to re-merge all unresolved files', | hint=b'use --all to re-merge all unresolved files', | ||||
) | ) | ||||
if confirm: | if confirm: | ||||
if all: | if all: | ||||
if ui.promptchoice( | if ui.promptchoice( | ||||
_(b're-merge all unresolved files (yn)?' b'$$ &Yes $$ &No') | _(b're-merge all unresolved files (yn)?$$ &Yes $$ &No') | ||||
): | ): | ||||
raise error.Abort(_(b'user quit')) | raise error.Abort(_(b'user quit')) | ||||
if mark and not pats: | if mark and not pats: | ||||
if ui.promptchoice( | if ui.promptchoice( | ||||
_( | _( | ||||
b'mark all unresolved files as resolved (yn)?' | b'mark all unresolved files as resolved (yn)?' | ||||
b'$$ &Yes $$ &No' | b'$$ &Yes $$ &No' | ||||
) | ) | ||||
if opts[b"stdio"] and opts[b"cmdserver"]: | if opts[b"stdio"] and opts[b"cmdserver"]: | ||||
raise error.Abort(_(b"cannot use --stdio with --cmdserver")) | raise error.Abort(_(b"cannot use --stdio with --cmdserver")) | ||||
if opts[b"print_url"] and ui.verbose: | if opts[b"print_url"] and ui.verbose: | ||||
raise error.Abort(_(b"cannot use --print-url with --verbose")) | raise error.Abort(_(b"cannot use --print-url with --verbose")) | ||||
if opts[b"stdio"]: | if opts[b"stdio"]: | ||||
if repo is None: | if repo is None: | ||||
raise error.RepoError( | raise error.RepoError( | ||||
_(b"there is no Mercurial repository here" b" (.hg not found)") | _(b"there is no Mercurial repository here (.hg not found)") | ||||
) | ) | ||||
s = wireprotoserver.sshserver(ui, repo) | s = wireprotoserver.sshserver(ui, repo) | ||||
s.serve_forever() | s.serve_forever() | ||||
service = server.createservice(ui, repo, opts) | service = server.createservice(ui, repo, opts) | ||||
return server.runservice(opts, initfn=service.init, runfn=service.run) | return server.runservice(opts, initfn=service.init, runfn=service.run) | ||||
rev_ = b"." | rev_ = b"." | ||||
names = [t.strip() for t in (name1,) + names] | names = [t.strip() for t in (name1,) + names] | ||||
if len(names) != len(set(names)): | if len(names) != len(set(names)): | ||||
raise error.Abort(_(b'tag names must be unique')) | raise error.Abort(_(b'tag names must be unique')) | ||||
for n in names: | for n in names: | ||||
scmutil.checknewlabel(repo, n, b'tag') | scmutil.checknewlabel(repo, n, b'tag') | ||||
if not n: | if not n: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'tag names cannot consist entirely of ' b'whitespace') | _(b'tag names cannot consist entirely of whitespace') | ||||
) | ) | ||||
if opts.get(b'rev') and opts.get(b'remove'): | if opts.get(b'rev') and opts.get(b'remove'): | ||||
raise error.Abort(_(b"--rev and --remove are incompatible")) | raise error.Abort(_(b"--rev and --remove are incompatible")) | ||||
if opts.get(b'rev'): | if opts.get(b'rev'): | ||||
rev_ = opts[b'rev'] | rev_ = opts[b'rev'] | ||||
message = opts.get(b'message') | message = opts.get(b'message') | ||||
if opts.get(b'remove'): | if opts.get(b'remove'): | ||||
if opts.get(b'local'): | if opts.get(b'local'): | ||||
rev_ = b'null' | rev_ = b'null' | ||||
if not message: | if not message: | ||||
# we don't translate commit messages | # we don't translate commit messages | ||||
message = b'Removed tag %s' % b', '.join(names) | message = b'Removed tag %s' % b', '.join(names) | ||||
elif not opts.get(b'force'): | elif not opts.get(b'force'): | ||||
for n in names: | for n in names: | ||||
if n in repo.tags(): | if n in repo.tags(): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"tag '%s' already exists " b"(use -f to force)") % n | _(b"tag '%s' already exists (use -f to force)") % n | ||||
) | ) | ||||
if not opts.get(b'local'): | if not opts.get(b'local'): | ||||
p1, p2 = repo.dirstate.parents() | p1, p2 = repo.dirstate.parents() | ||||
if p2 != nullid: | if p2 != nullid: | ||||
raise error.Abort(_(b'uncommitted merge')) | raise error.Abort(_(b'uncommitted merge')) | ||||
bheads = repo.branchheads() | bheads = repo.branchheads() | ||||
if not opts.get(b'force') and bheads and p1 not in bheads: | if not opts.get(b'force') and bheads and p1 not in bheads: | ||||
raise error.Abort( | raise error.Abort( |
if err.errno != errno.ENOENT: | if err.errno != errno.ENOENT: | ||||
raise | raise | ||||
self._repo.ui.warn( | self._repo.ui.warn( | ||||
_(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest) | _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest) | ||||
) | ) | ||||
return | return | ||||
if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): | ||||
self._repo.ui.warn( | self._repo.ui.warn( | ||||
_(b"copy failed: %s is not a file or a " b"symbolic link\n") | _(b"copy failed: %s is not a file or a symbolic link\n") | ||||
% self._repo.dirstate.pathto(dest) | % self._repo.dirstate.pathto(dest) | ||||
) | ) | ||||
else: | else: | ||||
with self._repo.wlock(): | with self._repo.wlock(): | ||||
ds = self._repo.dirstate | ds = self._repo.dirstate | ||||
if ds[dest] in b'?': | if ds[dest] in b'?': | ||||
ds.add(dest) | ds.add(dest) | ||||
elif ds[dest] in b'r': | elif ds[dest] in b'r': | ||||
d = self[f].data() | d = self[f].data() | ||||
if ( | if ( | ||||
d == b'' | d == b'' | ||||
or len(d) >= 1024 | or len(d) >= 1024 | ||||
or b'\n' in d | or b'\n' in d | ||||
or stringutil.binary(d) | or stringutil.binary(d) | ||||
): | ): | ||||
self._repo.ui.debug( | self._repo.ui.debug( | ||||
b'ignoring suspect symlink placeholder' b' "%s"\n' % f | b'ignoring suspect symlink placeholder "%s"\n' % f | ||||
) | ) | ||||
continue | continue | ||||
sane.append(f) | sane.append(f) | ||||
return sane | return sane | ||||
def _checklookup(self, files): | def _checklookup(self, files): | ||||
# check for any possibly clean files | # check for any possibly clean files | ||||
if not files: | if not files: | ||||
for ps in poststatus: | for ps in poststatus: | ||||
ps(self, status) | ps(self, status) | ||||
else: | else: | ||||
# in this case, writing changes out breaks | # in this case, writing changes out breaks | ||||
# consistency, because .hg/dirstate was | # consistency, because .hg/dirstate was | ||||
# already changed simultaneously after last | # already changed simultaneously after last | ||||
# caching (see also issue5584 for detail) | # caching (see also issue5584 for detail) | ||||
self._repo.ui.debug( | self._repo.ui.debug( | ||||
b'skip updating dirstate: ' b'identity mismatch\n' | b'skip updating dirstate: identity mismatch\n' | ||||
) | ) | ||||
except error.LockError: | except error.LockError: | ||||
pass | pass | ||||
finally: | finally: | ||||
# Even if the wlock couldn't be grabbed, clear out the list. | # Even if the wlock couldn't be grabbed, clear out the list. | ||||
self._repo.clearpostdsstatus() | self._repo.clearpostdsstatus() | ||||
def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): | def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): |
if f not in fullcopy: | if f not in fullcopy: | ||||
for d in dirmove: | for d in dirmove: | ||||
if f.startswith(d): | if f.startswith(d): | ||||
# new file added in a directory that was moved, move it | # new file added in a directory that was moved, move it | ||||
df = dirmove[d] + f[len(d) :] | df = dirmove[d] + f[len(d) :] | ||||
if df not in copy: | if df not in copy: | ||||
movewithdir[f] = df | movewithdir[f] = df | ||||
repo.ui.debug( | repo.ui.debug( | ||||
(b" pending file src: '%s' -> " b"dst: '%s'\n") | b" pending file src: '%s' -> dst: '%s'\n" | ||||
% (f, df) | % (f, df) | ||||
) | ) | ||||
break | break | ||||
return copy, movewithdir, diverge, renamedelete, dirmove | return copy, movewithdir, diverge, renamedelete, dirmove | ||||
def _heuristicscopytracing(repo, c1, c2, base): | def _heuristicscopytracing(repo, c1, c2, base): |
else: | else: | ||||
s = b'' | s = b'' | ||||
i = 0 | i = 0 | ||||
while c != b'\0' and i < 10: | while c != b'\0' and i < 10: | ||||
s += c | s += c | ||||
i += 1 | i += 1 | ||||
c = nextch() | c = nextch() | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'invalid character in dag description: ' b'%s...') % s | _(b'invalid character in dag description: %s...') % s | ||||
) | ) | ||||
def dagtextlines( | def dagtextlines( | ||||
events, | events, | ||||
addspaces=True, | addspaces=True, | ||||
wraplabels=False, | wraplabels=False, | ||||
wrapannotations=False, | wrapannotations=False, | ||||
if wrapannotations: | if wrapannotations: | ||||
yield b'\n' | yield b'\n' | ||||
yield b'@' + wrapstring(data) | yield b'@' + wrapstring(data) | ||||
elif kind == b'#': | elif kind == b'#': | ||||
yield b'#' + data | yield b'#' + data | ||||
yield b'\n' | yield b'\n' | ||||
else: | else: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"invalid event type in dag: " b"('%s', '%s')") | _(b"invalid event type in dag: ('%s', '%s')") | ||||
% ( | % ( | ||||
stringutil.escapestr(kind), | stringutil.escapestr(kind), | ||||
stringutil.escapestr(data), | stringutil.escapestr(data), | ||||
) | ) | ||||
) | ) | ||||
if run: | if run: | ||||
yield b'+%d' % run | yield b'+%d' % run | ||||
"""find the ancestor revision of two revisions in a given index""" | """find the ancestor revision of two revisions in a given index""" | ||||
if len(args) == 3: | if len(args) == 3: | ||||
index, rev1, rev2 = args | index, rev1, rev2 = args | ||||
r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index) | r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index) | ||||
lookup = r.lookup | lookup = r.lookup | ||||
elif len(args) == 2: | elif len(args) == 2: | ||||
if not repo: | if not repo: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'there is no Mercurial repository here ' b'(.hg not found)') | _(b'there is no Mercurial repository here (.hg not found)') | ||||
) | ) | ||||
rev1, rev2 = args | rev1, rev2 = args | ||||
r = repo.changelog | r = repo.changelog | ||||
lookup = repo.lookup | lookup = repo.lookup | ||||
else: | else: | ||||
raise error.Abort(_(b'either two or three arguments required')) | raise error.Abort(_(b'either two or three arguments required')) | ||||
a = r.ancestor(lookup(rev1), lookup(rev2)) | a = r.ancestor(lookup(rev1), lookup(rev2)) | ||||
ui.write(b'%d:%s\n' % (r.rev(a), hex(a))) | ui.write(b'%d:%s\n' % (r.rev(a), hex(a))) | ||||
try: | try: | ||||
codecs.lookup(pycompat.sysstr(encoding.encoding)) | codecs.lookup(pycompat.sysstr(encoding.encoding)) | ||||
except LookupError as inst: | except LookupError as inst: | ||||
err = stringutil.forcebytestr(inst) | err = stringutil.forcebytestr(inst) | ||||
problems += 1 | problems += 1 | ||||
fm.condwrite( | fm.condwrite( | ||||
err, | err, | ||||
b'encodingerror', | b'encodingerror', | ||||
_(b" %s\n" b" (check that your locale is properly set)\n"), | _(b" %s\n (check that your locale is properly set)\n"), | ||||
err, | err, | ||||
) | ) | ||||
# Python | # Python | ||||
fm.write( | fm.write( | ||||
b'pythonexe', | b'pythonexe', | ||||
_(b"checking Python executable (%s)\n"), | _(b"checking Python executable (%s)\n"), | ||||
pycompat.sysexecutable or _(b"unknown"), | pycompat.sysexecutable or _(b"unknown"), | ||||
sorted(e.name() for e in compengines), | sorted(e.name() for e in compengines), | ||||
name=b'compengine', | name=b'compengine', | ||||
fmt=b'%s', | fmt=b'%s', | ||||
sep=b', ', | sep=b', ', | ||||
), | ), | ||||
) | ) | ||||
fm.write( | fm.write( | ||||
b'compenginesavail', | b'compenginesavail', | ||||
_(b'checking available compression engines ' b'(%s)\n'), | _(b'checking available compression engines (%s)\n'), | ||||
fm.formatlist( | fm.formatlist( | ||||
sorted(e.name() for e in compengines if e.available()), | sorted(e.name() for e in compengines if e.available()), | ||||
name=b'compengine', | name=b'compengine', | ||||
fmt=b'%s', | fmt=b'%s', | ||||
sep=b', ', | sep=b', ', | ||||
), | ), | ||||
) | ) | ||||
wirecompengines = compression.compengines.supportedwireengines( | wirecompengines = compression.compengines.supportedwireengines( | ||||
problems += handler(ui, fm) | problems += handler(ui, fm) | ||||
fm.condwrite(not problems, b'', _(b"no problems detected\n")) | fm.condwrite(not problems, b'', _(b"no problems detected\n")) | ||||
if not problems: | if not problems: | ||||
fm.data(problems=problems) | fm.data(problems=problems) | ||||
fm.condwrite( | fm.condwrite( | ||||
problems, | problems, | ||||
b'problems', | b'problems', | ||||
_(b"%d problems detected," b" please check your install!\n"), | _(b"%d problems detected, please check your install!\n"), | ||||
problems, | problems, | ||||
) | ) | ||||
fm.end() | fm.end() | ||||
return problems | return problems | ||||
@command(b'debugknown', [], _(b'REPO ID...'), norepo=True) | @command(b'debugknown', [], _(b'REPO ID...'), norepo=True) | ||||
False, | False, | ||||
_(b'record parent information for the precursor'), | _(b'record parent information for the precursor'), | ||||
), | ), | ||||
(b'r', b'rev', [], _(b'display markers relevant to REV')), | (b'r', b'rev', [], _(b'display markers relevant to REV')), | ||||
( | ( | ||||
b'', | b'', | ||||
b'exclusive', | b'exclusive', | ||||
False, | False, | ||||
_(b'restrict display to markers only ' b'relevant to REV'), | _(b'restrict display to markers only relevant to REV'), | ||||
), | ), | ||||
(b'', b'index', False, _(b'display index of the marker')), | (b'', b'index', False, _(b'display index of the marker')), | ||||
(b'', b'delete', [], _(b'delete markers specified by indices')), | (b'', b'delete', [], _(b'delete markers specified by indices')), | ||||
] | ] | ||||
+ cmdutil.commitopts2 | + cmdutil.commitopts2 | ||||
+ cmdutil.formatteropts, | + cmdutil.formatteropts, | ||||
_(b'[OBSOLETED [REPLACEMENT ...]]'), | _(b'[OBSOLETED [REPLACEMENT ...]]'), | ||||
) | ) | ||||
except ValueError: | except ValueError: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'invalid index value: %r') % v, | _(b'invalid index value: %r') % v, | ||||
hint=_(b'use integers for indices'), | hint=_(b'use integers for indices'), | ||||
) | ) | ||||
if repo.currenttransaction(): | if repo.currenttransaction(): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot delete obsmarkers in the middle ' b'of transaction.') | _(b'cannot delete obsmarkers in the middle of transaction.') | ||||
) | ) | ||||
with repo.lock(): | with repo.lock(): | ||||
n = repair.deleteobsmarkers(repo.obsstore, indices) | n = repair.deleteobsmarkers(repo.obsstore, indices) | ||||
ui.write(_(b'deleted %i obsolescence markers\n') % n) | ui.write(_(b'deleted %i obsolescence markers\n') % n) | ||||
return | return | ||||
idlen = 12 | idlen = 12 | ||||
for i in r: | for i in r: | ||||
idlen = len(shortfn(r.node(i))) | idlen = len(shortfn(r.node(i))) | ||||
break | break | ||||
if format == 0: | if format == 0: | ||||
if ui.verbose: | if ui.verbose: | ||||
ui.writenoi18n( | ui.writenoi18n( | ||||
(b" rev offset length linkrev" b" %s %s p2\n") | b" rev offset length linkrev %s %s p2\n" | ||||
% (b"nodeid".ljust(idlen), b"p1".ljust(idlen)) | % (b"nodeid".ljust(idlen), b"p1".ljust(idlen)) | ||||
) | ) | ||||
else: | else: | ||||
ui.writenoi18n( | ui.writenoi18n( | ||||
b" rev linkrev %s %s p2\n" | b" rev linkrev %s %s p2\n" | ||||
% (b"nodeid".ljust(idlen), b"p1".ljust(idlen)) | % (b"nodeid".ljust(idlen), b"p1".ljust(idlen)) | ||||
) | ) | ||||
elif format == 1: | elif format == 1: | ||||
(b'concatenated', revsetlang.foldconcat), | (b'concatenated', revsetlang.foldconcat), | ||||
(b'analyzed', revsetlang.analyze), | (b'analyzed', revsetlang.analyze), | ||||
(b'optimized', revsetlang.optimize), | (b'optimized', revsetlang.optimize), | ||||
] | ] | ||||
if opts[b'no_optimized']: | if opts[b'no_optimized']: | ||||
stages = stages[:-1] | stages = stages[:-1] | ||||
if opts[b'verify_optimized'] and opts[b'no_optimized']: | if opts[b'verify_optimized'] and opts[b'no_optimized']: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot use --verify-optimized with ' b'--no-optimized') | _(b'cannot use --verify-optimized with --no-optimized') | ||||
) | ) | ||||
stagenames = set(n for n, f in stages) | stagenames = set(n for n, f in stages) | ||||
showalways = set() | showalways = set() | ||||
showchanged = set() | showchanged = set() | ||||
if ui.verbose and not opts[b'show_stage']: | if ui.verbose and not opts[b'show_stage']: | ||||
# show parsed tree by --verbose (deprecated) | # show parsed tree by --verbose (deprecated) | ||||
showalways.add(b'parsed') | showalways.add(b'parsed') | ||||
If SOURCE is omitted, the 'default' path will be used. If a URL is given, | If SOURCE is omitted, the 'default' path will be used. If a URL is given, | ||||
that server is used. See :hg:`help urls` for more information. | that server is used. See :hg:`help urls` for more information. | ||||
If the update succeeds, retry the original operation. Otherwise, the cause | If the update succeeds, retry the original operation. Otherwise, the cause | ||||
of the SSL error is likely another issue. | of the SSL error is likely another issue. | ||||
''' | ''' | ||||
if not pycompat.iswindows: | if not pycompat.iswindows: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'certificate chain building is only possible on ' b'Windows') | _(b'certificate chain building is only possible on Windows') | ||||
) | ) | ||||
if not source: | if not source: | ||||
if not repo: | if not repo: | ||||
raise error.Abort( | raise error.Abort( | ||||
_( | _( | ||||
b"there is no Mercurial repository here, and no " | b"there is no Mercurial repository here, and no " | ||||
b"server specified" | b"server specified" | ||||
template. | template. | ||||
Use --verbose to print the parsed tree. | Use --verbose to print the parsed tree. | ||||
""" | """ | ||||
revs = None | revs = None | ||||
if opts[r'rev']: | if opts[r'rev']: | ||||
if repo is None: | if repo is None: | ||||
raise error.RepoError( | raise error.RepoError( | ||||
_(b'there is no Mercurial repository here ' b'(.hg not found)') | _(b'there is no Mercurial repository here (.hg not found)') | ||||
) | ) | ||||
revs = scmutil.revrange(repo, opts[r'rev']) | revs = scmutil.revrange(repo, opts[r'rev']) | ||||
props = {} | props = {} | ||||
for d in opts[r'define']: | for d in opts[r'define']: | ||||
try: | try: | ||||
k, v = (e.strip() for e in d.split(b'=', 1)) | k, v = (e.strip() for e in d.split(b'=', 1)) | ||||
if not k or k == b'ui': | if not k or k == b'ui': | ||||
b'ssh2', | b'ssh2', | ||||
): | ): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'invalid value for --peer'), | _(b'invalid value for --peer'), | ||||
hint=_(b'valid values are "raw", "ssh1", and "ssh2"'), | hint=_(b'valid values are "raw", "ssh1", and "ssh2"'), | ||||
) | ) | ||||
if path and opts[b'localssh']: | if path and opts[b'localssh']: | ||||
raise error.Abort( | raise error.Abort(_(b'cannot specify --localssh with an explicit path')) | ||||
_(b'cannot specify --localssh with an explicit ' b'path') | |||||
) | |||||
if ui.interactive(): | if ui.interactive(): | ||||
ui.write(_(b'(waiting for commands on stdin)\n')) | ui.write(_(b'(waiting for commands on stdin)\n')) | ||||
blocks = list(_parsewirelangblocks(ui.fin)) | blocks = list(_parsewirelangblocks(ui.fin)) | ||||
proc = None | proc = None | ||||
stdin = None | stdin = None | ||||
_(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk)) | _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk)) | ||||
) | ) | ||||
batchedcommands = None | batchedcommands = None | ||||
elif action.startswith(b'httprequest '): | elif action.startswith(b'httprequest '): | ||||
if not opener: | if not opener: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot use httprequest without an HTTP ' b'peer') | _(b'cannot use httprequest without an HTTP peer') | ||||
) | ) | ||||
request = action.split(b' ', 2) | request = action.split(b' ', 2) | ||||
if len(request) != 3: | if len(request) != 3: | ||||
raise error.Abort( | raise error.Abort( | ||||
_( | _( | ||||
b'invalid httprequest: expected format is ' | b'invalid httprequest: expected format is ' | ||||
b'"httprequest <method> <path>' | b'"httprequest <method> <path>' |
wc = repo[None] | wc = repo[None] | ||||
currentbranch = wc.branch() | currentbranch = wc.branch() | ||||
movemark = None | movemark = None | ||||
if currentbranch in repo.branchmap(): | if currentbranch in repo.branchmap(): | ||||
# here, all descendant branch heads are closed | # here, all descendant branch heads are closed | ||||
heads = repo.branchheads(currentbranch, closed=True) | heads = repo.branchheads(currentbranch, closed=True) | ||||
assert heads, b"any branch has at least one head" | assert heads, b"any branch has at least one head" | ||||
node = repo.revs(b'max(.::(%ln))', heads).first() | node = repo.revs(b'max(.::(%ln))', heads).first() | ||||
assert node is not None, ( | assert ( | ||||
b"any revision has at least " b"one descendant branch head" | node is not None | ||||
) | ), b"any revision has at least one descendant branch head" | ||||
if bookmarks.isactivewdirparent(repo): | if bookmarks.isactivewdirparent(repo): | ||||
movemark = repo[b'.'].node() | movemark = repo[b'.'].node() | ||||
else: | else: | ||||
# here, no "default" branch, and all branches are closed | # here, no "default" branch, and all branches are closed | ||||
node = repo.lookup(b'tip') | node = repo.lookup(b'tip') | ||||
assert node is not None, b"'tip' exists even in empty repository" | assert node is not None, b"'tip' exists even in empty repository" | ||||
return node, movemark, None | return node, movemark, None | ||||
return | return | ||||
self._addpath(f, b'n', 0, -1, -1) | self._addpath(f, b'n', 0, -1, -1) | ||||
self._map.copymap.pop(f, None) | self._map.copymap.pop(f, None) | ||||
def otherparent(self, f): | def otherparent(self, f): | ||||
'''Mark as coming from the other parent, always dirty.''' | '''Mark as coming from the other parent, always dirty.''' | ||||
if self._pl[1] == nullid: | if self._pl[1] == nullid: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"setting %r to other parent " b"only allowed in merges") % f | _(b"setting %r to other parent only allowed in merges") % f | ||||
) | ) | ||||
if f in self and self[f] == b'n': | if f in self and self[f] == b'n': | ||||
# merge-like | # merge-like | ||||
self._addpath(f, b'm', 0, -2, -1) | self._addpath(f, b'm', 0, -2, -1) | ||||
else: | else: | ||||
# add-like | # add-like | ||||
self._addpath(f, b'n', 0, -2, -1) | self._addpath(f, b'n', 0, -2, -1) | ||||
self._map.copymap.pop(f, None) | self._map.copymap.pop(f, None) | ||||
def _alldirs(self): | def _alldirs(self): | ||||
return util.dirs(self._map) | return util.dirs(self._map) | ||||
def _opendirstatefile(self): | def _opendirstatefile(self): | ||||
fp, mode = txnutil.trypending(self._root, self._opener, self._filename) | fp, mode = txnutil.trypending(self._root, self._opener, self._filename) | ||||
if self._pendingmode is not None and self._pendingmode != mode: | if self._pendingmode is not None and self._pendingmode != mode: | ||||
fp.close() | fp.close() | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'working directory state may be ' b'changed parallelly') | _(b'working directory state may be changed parallelly') | ||||
) | ) | ||||
self._pendingmode = mode | self._pendingmode = mode | ||||
return fp | return fp | ||||
def parents(self): | def parents(self): | ||||
if not self._parents: | if not self._parents: | ||||
try: | try: | ||||
fp = self._opendirstatefile() | fp = self._opendirstatefile() | ||||
st = fp.read(40) | st = fp.read(40) | ||||
fp.close() | fp.close() | ||||
except IOError as err: | except IOError as err: | ||||
if err.errno != errno.ENOENT: | if err.errno != errno.ENOENT: | ||||
raise | raise | ||||
# File doesn't exist, so the current state is empty | # File doesn't exist, so the current state is empty | ||||
st = b'' | st = b'' | ||||
l = len(st) | l = len(st) | ||||
if l == 40: | if l == 40: | ||||
self._parents = (st[:20], st[20:40]) | self._parents = (st[:20], st[20:40]) | ||||
elif l == 0: | elif l == 0: | ||||
self._parents = (nullid, nullid) | self._parents = (nullid, nullid) | ||||
else: | else: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'working directory state appears ' b'damaged!') | _(b'working directory state appears damaged!') | ||||
) | ) | ||||
return self._parents | return self._parents | ||||
def setparents(self, p1, p2): | def setparents(self, p1, p2): | ||||
self._parents = (p1, p2) | self._parents = (p1, p2) | ||||
self._dirtyparents = True | self._dirtyparents = True | ||||
def _opendirstatefile(self): | def _opendirstatefile(self): | ||||
fp, mode = txnutil.trypending( | fp, mode = txnutil.trypending( | ||||
self._root, self._opener, self._filename | self._root, self._opener, self._filename | ||||
) | ) | ||||
if self._pendingmode is not None and self._pendingmode != mode: | if self._pendingmode is not None and self._pendingmode != mode: | ||||
fp.close() | fp.close() | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'working directory state may be ' b'changed parallelly') | _(b'working directory state may be changed parallelly') | ||||
) | ) | ||||
self._pendingmode = mode | self._pendingmode = mode | ||||
return fp | return fp | ||||
def setparents(self, p1, p2): | def setparents(self, p1, p2): | ||||
self._rustmap.setparents(p1, p2) | self._rustmap.setparents(p1, p2) | ||||
self._parents = (p1, p2) | self._parents = (p1, p2) | ||||
self._dirtyparents = True | self._dirtyparents = True | ||||
raise | raise | ||||
# File doesn't exist, so the current state is empty | # File doesn't exist, so the current state is empty | ||||
st = b'' | st = b'' | ||||
try: | try: | ||||
self._parents = self._rustmap.parents(st) | self._parents = self._rustmap.parents(st) | ||||
except ValueError: | except ValueError: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'working directory state appears ' b'damaged!') | _(b'working directory state appears damaged!') | ||||
) | ) | ||||
return self._parents | return self._parents | ||||
def read(self): | def read(self): | ||||
# ignore HG_PENDING because identity is used only for writing | # ignore HG_PENDING because identity is used only for writing | ||||
self.identity = util.filestat.frompath( | self.identity = util.filestat.frompath( | ||||
self._opener.join(self._filename) | self._opener.join(self._filename) |
if unsyncedheads: | if unsyncedheads: | ||||
if None in unsyncedheads: | if None in unsyncedheads: | ||||
# old remote, no heads data | # old remote, no heads data | ||||
heads = None | heads = None | ||||
else: | else: | ||||
heads = scmutil.nodesummaries(repo, unsyncedheads) | heads = scmutil.nodesummaries(repo, unsyncedheads) | ||||
if heads is None: | if heads is None: | ||||
repo.ui.status( | repo.ui.status( | ||||
_(b"remote has heads that are " b"not known locally\n") | _(b"remote has heads that are not known locally\n") | ||||
) | ) | ||||
elif branch is None: | elif branch is None: | ||||
repo.ui.status( | repo.ui.status( | ||||
_(b"remote has heads that are " b"not known locally: %s\n") | _(b"remote has heads that are not known locally: %s\n") | ||||
% heads | % heads | ||||
) | ) | ||||
else: | else: | ||||
repo.ui.status( | repo.ui.status( | ||||
_( | _( | ||||
b"remote has heads on branch '%s' that are " | b"remote has heads on branch '%s' that are " | ||||
b"not known locally: %s\n" | b"not known locally: %s\n" | ||||
) | ) | ||||
) | ) | ||||
elif len(newhs) > len(oldhs): | elif len(newhs) > len(oldhs): | ||||
# remove bookmarked or existing remote heads from the new heads list | # remove bookmarked or existing remote heads from the new heads list | ||||
dhs = sorted(newhs - nowarnheads - oldhs) | dhs = sorted(newhs - nowarnheads - oldhs) | ||||
if dhs: | if dhs: | ||||
if errormsg is None: | if errormsg is None: | ||||
if branch not in (b'default', None): | if branch not in (b'default', None): | ||||
errormsg = _( | errormsg = _( | ||||
b"push creates new remote head %s " b"on branch '%s'!" | b"push creates new remote head %s on branch '%s'!" | ||||
) % (short(dhs[0]), branch) | ) % (short(dhs[0]), branch) | ||||
elif repo[dhs[0]].bookmarks(): | elif repo[dhs[0]].bookmarks(): | ||||
errormsg = _( | errormsg = _( | ||||
b"push creates new remote head %s " | b"push creates new remote head %s " | ||||
b"with bookmark '%s'!" | b"with bookmark '%s'!" | ||||
) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0]) | ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0]) | ||||
else: | else: | ||||
errormsg = _(b"push creates new remote head %s!") % short( | errormsg = _(b"push creates new remote head %s!") % short( |
# confine strings to be passed to i18n.gettext() | # confine strings to be passed to i18n.gettext() | ||||
cfg = {} | cfg = {} | ||||
for k in (b'doc', b'help', b'category'): | for k in (b'doc', b'help', b'category'): | ||||
v = ui.config(b'alias', b'%s:%s' % (name, k), None) | v = ui.config(b'alias', b'%s:%s' % (name, k), None) | ||||
if v is None: | if v is None: | ||||
continue | continue | ||||
if not encoding.isasciistr(v): | if not encoding.isasciistr(v): | ||||
self.badalias = _( | self.badalias = _( | ||||
b"non-ASCII character in alias definition " b"'%s:%s'" | b"non-ASCII character in alias definition '%s:%s'" | ||||
) % (name, k) | ) % (name, k) | ||||
return | return | ||||
cfg[k] = v | cfg[k] = v | ||||
self.help = cfg.get(b'help', defaulthelp or b'') | self.help = cfg.get(b'help', defaulthelp or b'') | ||||
if self.help and self.help.startswith(b"hg " + cmd): | if self.help and self.help.startswith(b"hg " + cmd): | ||||
# drop prefix in old-style help lines so hg shows the alias | # drop prefix in old-style help lines so hg shows the alias | ||||
self.help = self.help[4 + len(cmd) :] | self.help = self.help[4 + len(cmd) :] |
version = b'v2' | version = b'v2' | ||||
else: | else: | ||||
raise error.Abort( | raise error.Abort( | ||||
_( | _( | ||||
b'changegroup version %s does not have ' | b'changegroup version %s does not have ' | ||||
b'a known bundlespec' | b'a known bundlespec' | ||||
) | ) | ||||
% version, | % version, | ||||
hint=_(b'try upgrading your Mercurial ' b'client'), | hint=_(b'try upgrading your Mercurial client'), | ||||
) | ) | ||||
elif part.type == b'stream2' and version is None: | elif part.type == b'stream2' and version is None: | ||||
# A stream2 part requires to be part of a v2 bundle | # A stream2 part requires to be part of a v2 bundle | ||||
requirements = urlreq.unquote(part.params[b'requirements']) | requirements = urlreq.unquote(part.params[b'requirements']) | ||||
splitted = requirements.split() | splitted = requirements.split() | ||||
params = bundle2._formatrequirementsparams(splitted) | params = bundle2._formatrequirementsparams(splitted) | ||||
return b'none-v2;stream=v2;%s' % params | return b'none-v2;stream=v2;%s' % params | ||||
if not version: | if not version: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'could not identify changegroup version in ' b'bundle') | _(b'could not identify changegroup version in bundle') | ||||
) | ) | ||||
return b'%s-%s' % (comp, version) | return b'%s-%s' % (comp, version) | ||||
elif isinstance(b, streamclone.streamcloneapplier): | elif isinstance(b, streamclone.streamcloneapplier): | ||||
requirements = streamclone.readbundle1header(fh)[2] | requirements = streamclone.readbundle1header(fh)[2] | ||||
formatted = bundle2._formatrequirementsparams(requirements) | formatted = bundle2._formatrequirementsparams(requirements) | ||||
return b'none-packed1;%s' % formatted | return b'none-packed1;%s' % formatted | ||||
else: | else: | ||||
published = repo.revs(b'::%ln - public()', pushop.revs) | published = repo.revs(b'::%ln - public()', pushop.revs) | ||||
if published: | if published: | ||||
if behavior == b'warn': | if behavior == b'warn': | ||||
ui.warn( | ui.warn( | ||||
_(b'%i changesets about to be published\n') % len(published) | _(b'%i changesets about to be published\n') % len(published) | ||||
) | ) | ||||
elif behavior == b'confirm': | elif behavior == b'confirm': | ||||
if ui.promptchoice( | if ui.promptchoice( | ||||
_(b'push and publish %i changesets (yn)?' b'$$ &Yes $$ &No') | _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No') | ||||
% len(published) | % len(published) | ||||
): | ): | ||||
raise error.Abort(_(b'user quit')) | raise error.Abort(_(b'user quit')) | ||||
elif behavior == b'abort': | elif behavior == b'abort': | ||||
msg = _(b'push would publish %i changesets') % len(published) | msg = _(b'push would publish %i changesets') % len(published) | ||||
hint = _( | hint = _( | ||||
b"use --publish or adjust 'experimental.auto-publish'" | b"use --publish or adjust 'experimental.auto-publish'" | ||||
b" config" | b" config" | ||||
return b'delete' | return b'delete' | ||||
return b'update' | return b'update' | ||||
def _abortonsecretctx(pushop, node, b): | def _abortonsecretctx(pushop, node, b): | ||||
"""abort if a given bookmark points to a secret changeset""" | """abort if a given bookmark points to a secret changeset""" | ||||
if node and pushop.repo[node].phase() == phases.secret: | if node and pushop.repo[node].phase() == phases.secret: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot push bookmark %s as it points to a secret' b' changeset') | _(b'cannot push bookmark %s as it points to a secret changeset') % b | ||||
% b | |||||
) | ) | ||||
def _pushb2bookmarkspart(pushop, bundler): | def _pushb2bookmarkspart(pushop, bundler): | ||||
pushop.stepsdone.add(b'bookmarks') | pushop.stepsdone.add(b'bookmarks') | ||||
if not pushop.outbookmarks: | if not pushop.outbookmarks: | ||||
return | return | ||||
if not ( | if not ( | ||||
their_heads == [b'force'] | their_heads == [b'force'] | ||||
or their_heads == heads | or their_heads == heads | ||||
or their_heads == [b'hashed', heads_hash] | or their_heads == [b'hashed', heads_hash] | ||||
): | ): | ||||
# someone else committed/pushed/unbundled while we | # someone else committed/pushed/unbundled while we | ||||
# were transferring data | # were transferring data | ||||
raise error.PushRaced( | raise error.PushRaced( | ||||
b'repository changed while %s - ' b'please try again' % context | b'repository changed while %s - please try again' % context | ||||
) | ) | ||||
def unbundle(repo, cg, heads, source, url): | def unbundle(repo, cg, heads, source, url): | ||||
"""Apply a bundle to a repo. | """Apply a bundle to a repo. | ||||
this function makes sure the repo is locked during the application and have | this function makes sure the repo is locked during the application and have | ||||
mechanism to check that no push race occurred between the creation of the | mechanism to check that no push race occurred between the creation of the | ||||
# clone! | # clone! | ||||
repo.ui.warn( | repo.ui.warn( | ||||
_( | _( | ||||
b'no compatible clone bundles available on server; ' | b'no compatible clone bundles available on server; ' | ||||
b'falling back to regular clone\n' | b'falling back to regular clone\n' | ||||
) | ) | ||||
) | ) | ||||
repo.ui.warn( | repo.ui.warn( | ||||
_(b'(you may want to report this to the server ' b'operator)\n') | _(b'(you may want to report this to the server operator)\n') | ||||
) | ) | ||||
return | return | ||||
entries = sortclonebundleentries(repo.ui, entries) | entries = sortclonebundleentries(repo.ui, entries) | ||||
url = entries[0][b'URL'] | url = entries[0][b'URL'] | ||||
repo.ui.status(_(b'applying clone bundle from %s\n') % url) | repo.ui.status(_(b'applying clone bundle from %s\n') % url) | ||||
if trypullbundlefromurl(repo.ui, repo, url): | if trypullbundlefromurl(repo.ui, repo, url): |
raise error.Abort( | raise error.Abort( | ||||
_(b'remote file data missing key: %s') % k | _(b'remote file data missing key: %s') % k | ||||
) | ) | ||||
if filemeta[b'location'] == b'store': | if filemeta[b'location'] == b'store': | ||||
vfs = repo.svfs | vfs = repo.svfs | ||||
else: | else: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'invalid location for raw file data: ' b'%s') | _(b'invalid location for raw file data: %s') | ||||
% filemeta[b'location'] | % filemeta[b'location'] | ||||
) | ) | ||||
bytesremaining = filemeta[b'size'] | bytesremaining = filemeta[b'size'] | ||||
with vfs.open(filemeta[b'path'], b'wb') as fh: | with vfs.open(filemeta[b'path'], b'wb') as fh: | ||||
while True: | while True: | ||||
try: | try: |
ui = repo.ui | ui = repo.ui | ||||
fd = fcd.path() | fd = fcd.path() | ||||
uipathfn = scmutil.getuipathfn(repo) | uipathfn = scmutil.getuipathfn(repo) | ||||
# Avoid prompting during an in-memory merge since it doesn't support merge | # Avoid prompting during an in-memory merge since it doesn't support merge | ||||
# conflicts. | # conflicts. | ||||
if fcd.changectx().isinmemory(): | if fcd.changectx().isinmemory(): | ||||
raise error.InMemoryMergeConflictsError( | raise error.InMemoryMergeConflictsError( | ||||
b'in-memory merge does not ' b'support file conflicts' | b'in-memory merge does not support file conflicts' | ||||
) | ) | ||||
prompts = partextras(labels) | prompts = partextras(labels) | ||||
prompts[b'fd'] = uipathfn(fd) | prompts[b'fd'] = uipathfn(fd) | ||||
try: | try: | ||||
if fco.isabsent(): | if fco.isabsent(): | ||||
index = ui.promptchoice(_localchangedotherdeletedmsg % prompts, 2) | index = ui.promptchoice(_localchangedotherdeletedmsg % prompts, 2) | ||||
choice = [b'local', b'other', b'unresolved'][index] | choice = [b'local', b'other', b'unresolved'][index] | ||||
# do we attempt to simplemerge first? | # do we attempt to simplemerge first? | ||||
try: | try: | ||||
premerge = _toolbool(ui, tool, b"premerge", not binary) | premerge = _toolbool(ui, tool, b"premerge", not binary) | ||||
except error.ConfigError: | except error.ConfigError: | ||||
premerge = _toolstr(ui, tool, b"premerge", b"").lower() | premerge = _toolstr(ui, tool, b"premerge", b"").lower() | ||||
if premerge not in validkeep: | if premerge not in validkeep: | ||||
_valid = b', '.join([b"'" + v + b"'" for v in validkeep]) | _valid = b', '.join([b"'" + v + b"'" for v in validkeep]) | ||||
raise error.ConfigError( | raise error.ConfigError( | ||||
_(b"%s.premerge not valid " b"('%s' is neither boolean nor %s)") | _(b"%s.premerge not valid ('%s' is neither boolean nor %s)") | ||||
% (tool, premerge, _valid) | % (tool, premerge, _valid) | ||||
) | ) | ||||
if premerge: | if premerge: | ||||
if premerge == b'keep-merge3': | if premerge == b'keep-merge3': | ||||
if not labels: | if not labels: | ||||
labels = _defaultconflictlabels | labels = _defaultconflictlabels | ||||
if len(labels) < 3: | if len(labels) < 3: | ||||
labels.append(b'base') | labels.append(b'base') | ||||
r = simplemerge.simplemerge(ui, fcd, fca, fco, quiet=True, label=labels) | r = simplemerge.simplemerge(ui, fcd, fca, fco, quiet=True, label=labels) | ||||
if not r: | if not r: | ||||
ui.debug(b" premerge successful\n") | ui.debug(b" premerge successful\n") | ||||
return 0 | return 0 | ||||
if premerge not in validkeep: | if premerge not in validkeep: | ||||
# restore from backup and try again | # restore from backup and try again | ||||
_restorebackup(fcd, back) | _restorebackup(fcd, back) | ||||
return 1 # continue merging | return 1 # continue merging | ||||
def _mergecheck(repo, mynode, orig, fcd, fco, fca, toolconf): | def _mergecheck(repo, mynode, orig, fcd, fco, fca, toolconf): | ||||
tool, toolpath, binary, symlink, scriptfn = toolconf | tool, toolpath, binary, symlink, scriptfn = toolconf | ||||
uipathfn = scmutil.getuipathfn(repo) | uipathfn = scmutil.getuipathfn(repo) | ||||
if symlink: | if symlink: | ||||
repo.ui.warn( | repo.ui.warn( | ||||
_(b'warning: internal %s cannot merge symlinks ' b'for %s\n') | _(b'warning: internal %s cannot merge symlinks for %s\n') | ||||
% (tool, uipathfn(fcd.path())) | % (tool, uipathfn(fcd.path())) | ||||
) | ) | ||||
return False | return False | ||||
if fcd.isabsent() or fco.isabsent(): | if fcd.isabsent() or fco.isabsent(): | ||||
repo.ui.warn( | repo.ui.warn( | ||||
_( | _( | ||||
b'warning: internal %s cannot merge change/delete ' | b'warning: internal %s cannot merge change/delete ' | ||||
b'conflict for %s\n' | b'conflict for %s\n' | ||||
""" | """ | ||||
a = _workingpath(repo, fcd) | a = _workingpath(repo, fcd) | ||||
fd = fcd.path() | fd = fcd.path() | ||||
from . import context | from . import context | ||||
if isinstance(fcd, context.overlayworkingfilectx): | if isinstance(fcd, context.overlayworkingfilectx): | ||||
raise error.InMemoryMergeConflictsError( | raise error.InMemoryMergeConflictsError( | ||||
b'in-memory merge does not ' b'support the :dump tool.' | b'in-memory merge does not support the :dump tool.' | ||||
) | ) | ||||
util.writefile(a + b".local", fcd.decodeddata()) | util.writefile(a + b".local", fcd.decodeddata()) | ||||
repo.wwrite(fd + b".other", fco.data(), fco.flags()) | repo.wwrite(fd + b".other", fco.data(), fco.flags()) | ||||
repo.wwrite(fd + b".base", fca.data(), fca.flags()) | repo.wwrite(fd + b".base", fca.data(), fca.flags()) | ||||
return False, 1, False | return False, 1, False | ||||
# for now. | # for now. | ||||
# | # | ||||
# It would be possible to run most tools with temporary files, but this | # It would be possible to run most tools with temporary files, but this | ||||
# raises the question of what to do if the user only partially resolves the | # raises the question of what to do if the user only partially resolves the | ||||
# file -- we can't leave a merge state. (Copy to somewhere in the .hg/ | # file -- we can't leave a merge state. (Copy to somewhere in the .hg/ | ||||
# directory and tell the user how to get it is my best idea, but it's | # directory and tell the user how to get it is my best idea, but it's | ||||
# clunky.) | # clunky.) | ||||
raise error.InMemoryMergeConflictsError( | raise error.InMemoryMergeConflictsError( | ||||
b'in-memory merge does not support ' b'external merge tools' | b'in-memory merge does not support external merge tools' | ||||
) | ) | ||||
def _describemerge(ui, repo, mynode, fcl, fcb, fco, env, toolpath, args): | def _describemerge(ui, repo, mynode, fcl, fcb, fco, env, toolpath, args): | ||||
tmpl = ui.config(b'ui', b'pre-merge-tool-output-template') | tmpl = ui.config(b'ui', b'pre-merge-tool-output-template') | ||||
if not tmpl: | if not tmpl: | ||||
return | return | ||||
ui.status(t.renderdefault(props)) | ui.status(t.renderdefault(props)) | ||||
def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None): | def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None): | ||||
tool, toolpath, binary, symlink, scriptfn = toolconf | tool, toolpath, binary, symlink, scriptfn = toolconf | ||||
uipathfn = scmutil.getuipathfn(repo) | uipathfn = scmutil.getuipathfn(repo) | ||||
if fcd.isabsent() or fco.isabsent(): | if fcd.isabsent() or fco.isabsent(): | ||||
repo.ui.warn( | repo.ui.warn( | ||||
_(b'warning: %s cannot merge change/delete conflict ' b'for %s\n') | _(b'warning: %s cannot merge change/delete conflict for %s\n') | ||||
% (tool, uipathfn(fcd.path())) | % (tool, uipathfn(fcd.path())) | ||||
) | ) | ||||
return False, 1, None | return False, 1, None | ||||
unused, unused, unused, back = files | unused, unused, unused, back = files | ||||
localpath = _workingpath(repo, fcd) | localpath = _workingpath(repo, fcd) | ||||
args = _toolstr(repo.ui, tool, b"args") | args = _toolstr(repo.ui, tool, b"args") | ||||
with _maketempfiles( | with _maketempfiles( | ||||
ui.status(_(b"merging %s\n") % fduipath) | ui.status(_(b"merging %s\n") % fduipath) | ||||
ui.debug(b"my %s other %s ancestor %s\n" % (fcd, fco, fca)) | ui.debug(b"my %s other %s ancestor %s\n" % (fcd, fco, fca)) | ||||
if precheck and not precheck(repo, mynode, orig, fcd, fco, fca, toolconf): | if precheck and not precheck(repo, mynode, orig, fcd, fco, fca, toolconf): | ||||
if onfailure: | if onfailure: | ||||
if wctx.isinmemory(): | if wctx.isinmemory(): | ||||
raise error.InMemoryMergeConflictsError( | raise error.InMemoryMergeConflictsError( | ||||
b'in-memory merge does ' b'not support merge ' b'conflicts' | b'in-memory merge does not support merge conflicts' | ||||
) | ) | ||||
ui.warn(onfailure % fduipath) | ui.warn(onfailure % fduipath) | ||||
return True, 1, False | return True, 1, False | ||||
back = _makebackup(repo, ui, wctx, fcd, premerge) | back = _makebackup(repo, ui, wctx, fcd, premerge) | ||||
files = (None, None, None, back) | files = (None, None, None, back) | ||||
r = 1 | r = 1 | ||||
try: | try: | ||||
def _haltmerge(): | def _haltmerge(): | ||||
msg = _(b'merge halted after failed merge (see hg resolve)') | msg = _(b'merge halted after failed merge (see hg resolve)') | ||||
raise error.InterventionRequired(msg) | raise error.InterventionRequired(msg) | ||||
def _onfilemergefailure(ui): | def _onfilemergefailure(ui): | ||||
action = ui.config(b'merge', b'on-failure') | action = ui.config(b'merge', b'on-failure') | ||||
if action == b'prompt': | if action == b'prompt': | ||||
msg = _(b'continue merge operation (yn)?' b'$$ &Yes $$ &No') | msg = _(b'continue merge operation (yn)?$$ &Yes $$ &No') | ||||
if ui.promptchoice(msg, 0) == 1: | if ui.promptchoice(msg, 0) == 1: | ||||
_haltmerge() | _haltmerge() | ||||
if action == b'halt': | if action == b'halt': | ||||
_haltmerge() | _haltmerge() | ||||
# default action is 'continue', in which case we neither prompt nor halt | # default action is 'continue', in which case we neither prompt nor halt | ||||
def hasconflictmarkers(data): | def hasconflictmarkers(data): | ||||
): | ): | ||||
if hasconflictmarkers(fcd.data()): | if hasconflictmarkers(fcd.data()): | ||||
r = 1 | r = 1 | ||||
checked = False | checked = False | ||||
if b'prompt' in _toollist(ui, tool, b"check"): | if b'prompt' in _toollist(ui, tool, b"check"): | ||||
checked = True | checked = True | ||||
if ui.promptchoice( | if ui.promptchoice( | ||||
_(b"was merge of '%s' successful (yn)?" b"$$ &Yes $$ &No") | _(b"was merge of '%s' successful (yn)?$$ &Yes $$ &No") | ||||
% uipathfn(fd), | % uipathfn(fd), | ||||
1, | 1, | ||||
): | ): | ||||
r = 1 | r = 1 | ||||
if ( | if ( | ||||
not r | not r | ||||
and not checked | and not checked |
pass | pass | ||||
elif ui.verbose: | elif ui.verbose: | ||||
rst.append( | rst.append( | ||||
b'\n%s\n' | b'\n%s\n' | ||||
% optrst(_(b"global options"), commands.globalopts, ui.verbose) | % optrst(_(b"global options"), commands.globalopts, ui.verbose) | ||||
) | ) | ||||
if name == b'shortlist': | if name == b'shortlist': | ||||
rst.append( | rst.append( | ||||
_(b"\n(use 'hg help' for the full list " b"of commands)\n") | _(b"\n(use 'hg help' for the full list of commands)\n") | ||||
) | ) | ||||
else: | else: | ||||
if name == b'shortlist': | if name == b'shortlist': | ||||
rst.append( | rst.append( | ||||
_( | _( | ||||
b"\n(use 'hg help' for the full list of commands " | b"\n(use 'hg help' for the full list of commands " | ||||
b"or 'hg -v' for details)\n" | b"or 'hg -v' for details)\n" | ||||
) | ) | ||||
) | ) | ||||
elif name and not full: | elif name and not full: | ||||
rst.append( | rst.append( | ||||
_(b"\n(use 'hg help %s' to show the full help " b"text)\n") | _(b"\n(use 'hg help %s' to show the full help text)\n") | ||||
% name | % name | ||||
) | ) | ||||
elif name and syns and name in syns.keys(): | elif name and syns and name in syns.keys(): | ||||
rst.append( | rst.append( | ||||
_( | _( | ||||
b"\n(use 'hg help -v -e %s' to show built-in " | b"\n(use 'hg help -v -e %s' to show built-in " | ||||
b"aliases and global options)\n" | b"aliases and global options)\n" | ||||
) | ) | ||||
b'(some details hidden, use --verbose' | b'(some details hidden, use --verbose' | ||||
b' to show complete help)' | b' to show complete help)' | ||||
) | ) | ||||
indicateomitted(rst, omitted) | indicateomitted(rst, omitted) | ||||
try: | try: | ||||
cmdutil.findcmd(name, commands.table) | cmdutil.findcmd(name, commands.table) | ||||
rst.append( | rst.append( | ||||
_(b"\nuse 'hg help -c %s' to see help for " b"the %s command\n") | _(b"\nuse 'hg help -c %s' to see help for the %s command\n") | ||||
% (name, name) | % (name, name) | ||||
) | ) | ||||
except error.UnknownCommand: | except error.UnknownCommand: | ||||
pass | pass | ||||
return rst | return rst | ||||
def helpext(name, subtopic=None): | def helpext(name, subtopic=None): | ||||
try: | try: | ||||
def helpextcmd(name, subtopic=None): | def helpextcmd(name, subtopic=None): | ||||
cmd, ext, doc = extensions.disabledcmd( | cmd, ext, doc = extensions.disabledcmd( | ||||
ui, name, ui.configbool(b'ui', b'strict') | ui, name, ui.configbool(b'ui', b'strict') | ||||
) | ) | ||||
doc = doc.splitlines()[0] | doc = doc.splitlines()[0] | ||||
rst = listexts( | rst = listexts( | ||||
_(b"'%s' is provided by the following " b"extension:") % cmd, | _(b"'%s' is provided by the following extension:") % cmd, | ||||
{ext: doc}, | {ext: doc}, | ||||
indent=4, | indent=4, | ||||
showdeprecated=True, | showdeprecated=True, | ||||
) | ) | ||||
rst.append(b'\n') | rst.append(b'\n') | ||||
rst.append( | rst.append( | ||||
_( | _( | ||||
b"(use 'hg help extensions' for information on enabling " | b"(use 'hg help extensions' for information on enabling " |
The new repo only has a requirements file and pointer to the source. | The new repo only has a requirements file and pointer to the source. | ||||
This function configures additional shared data. | This function configures additional shared data. | ||||
Extensions can wrap this function and write additional entries to | Extensions can wrap this function and write additional entries to | ||||
destrepo/.hg/shared to indicate additional pieces of data to be shared. | destrepo/.hg/shared to indicate additional pieces of data to be shared. | ||||
""" | """ | ||||
default = defaultpath or sourcerepo.ui.config(b'paths', b'default') | default = defaultpath or sourcerepo.ui.config(b'paths', b'default') | ||||
if default: | if default: | ||||
template = b'[paths]\n' b'default = %s\n' | template = b'[paths]\ndefault = %s\n' | ||||
destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default)) | destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default)) | ||||
if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements: | if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements: | ||||
with destrepo.wlock(): | with destrepo.wlock(): | ||||
narrowspec.copytoworkingcopy(destrepo) | narrowspec.copytoworkingcopy(destrepo) | ||||
def _postshareupdate(repo, update, checkout=None): | def _postshareupdate(repo, update, checkout=None): | ||||
"""Maybe perform a working directory update after a shared repo is created. | """Maybe perform a working directory update after a shared repo is created. | ||||
ms = mergemod.mergestate.read(repo) | ms = mergemod.mergestate.read(repo) | ||||
if ms.active(): | if ms.active(): | ||||
# there were conflicts | # there were conflicts | ||||
node = ms.localctx.hex() | node = ms.localctx.hex() | ||||
else: | else: | ||||
# there were no conficts, mergestate was not stored | # there were no conficts, mergestate was not stored | ||||
node = repo[b'.'].hex() | node = repo[b'.'].hex() | ||||
repo.ui.status( | repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12]) | ||||
_(b"aborting the merge, updating back to" b" %s\n") % node[:12] | |||||
) | |||||
stats = mergemod.update(repo, node, branchmerge=False, force=True) | stats = mergemod.update(repo, node, branchmerge=False, force=True) | ||||
_showstats(repo, stats) | _showstats(repo, stats) | ||||
return stats.unresolvedcount > 0 | return stats.unresolvedcount > 0 | ||||
def _incoming( | def _incoming( | ||||
displaychlist, subreporecurse, ui, repo, source, opts, buffered=False | displaychlist, subreporecurse, ui, repo, source, opts, buffered=False | ||||
): | ): |
def createapp(baseui, repo, webconf): | def createapp(baseui, repo, webconf): | ||||
if webconf: | if webconf: | ||||
return hgwebdir_mod.hgwebdir(webconf, baseui=baseui) | return hgwebdir_mod.hgwebdir(webconf, baseui=baseui) | ||||
else: | else: | ||||
if not repo: | if not repo: | ||||
raise error.RepoError( | raise error.RepoError( | ||||
_(b"there is no Mercurial repository" b" here (.hg not found)") | _(b"there is no Mercurial repository here (.hg not found)") | ||||
) | ) | ||||
return hgweb_mod.hgweb(repo, baseui=baseui) | return hgweb_mod.hgweb(repo, baseui=baseui) |
if badheaders: | if badheaders: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'illegal header on 304 response: %s' | b'illegal header on 304 response: %s' | ||||
% b', '.join(sorted(badheaders)) | % b', '.join(sorted(badheaders)) | ||||
) | ) | ||||
if self._bodygen is not None or self._bodywillwrite: | if self._bodygen is not None or self._bodywillwrite: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b"must use setbodybytes('') with " b"304 responses" | b"must use setbodybytes('') with 304 responses" | ||||
) | ) | ||||
# Various HTTP clients (notably httplib) won't read the HTTP response | # Various HTTP clients (notably httplib) won't read the HTTP response | ||||
# until the HTTP request has been sent in full. If servers (us) send a | # until the HTTP request has been sent in full. If servers (us) send a | ||||
# response before the HTTP request has been fully sent, the connection | # response before the HTTP request has been fully sent, the connection | ||||
# may deadlock because neither end is reading. | # may deadlock because neither end is reading. | ||||
# | # | ||||
# We work around this by "draining" the request data before | # We work around this by "draining" the request data before |
self._write(chunk) | self._write(chunk) | ||||
if not self.sent_headers: | if not self.sent_headers: | ||||
self.send_headers() | self.send_headers() | ||||
self._done() | self._done() | ||||
def send_headers(self): | def send_headers(self): | ||||
if not self.saved_status: | if not self.saved_status: | ||||
raise AssertionError( | raise AssertionError( | ||||
b"Sending headers before " b"start_response() called" | b"Sending headers before start_response() called" | ||||
) | ) | ||||
saved_status = self.saved_status.split(None, 1) | saved_status = self.saved_status.split(None, 1) | ||||
saved_status[0] = int(saved_status[0]) | saved_status[0] = int(saved_status[0]) | ||||
self.send_response(*saved_status) | self.send_response(*saved_status) | ||||
self.length = None | self.length = None | ||||
self._chunked = False | self._chunked = False | ||||
for h in self.saved_headers: | for h in self.saved_headers: | ||||
self.send_header(*h) | self.send_header(*h) |
) | ) | ||||
if encoding: | if encoding: | ||||
web.res.headers[b'Content-Encoding'] = encoding | web.res.headers[b'Content-Encoding'] = encoding | ||||
web.res.setbodywillwrite() | web.res.setbodywillwrite() | ||||
if list(web.res.sendresponse()): | if list(web.res.sendresponse()): | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'sendresponse() should not emit data ' b'if writing later' | b'sendresponse() should not emit data if writing later' | ||||
) | ) | ||||
bodyfh = web.res.getbodyfile() | bodyfh = web.res.getbodyfile() | ||||
archival.archive( | archival.archive( | ||||
web.repo, | web.repo, | ||||
bodyfh, | bodyfh, | ||||
cnode, | cnode, |
try: | try: | ||||
r = obj(ui=ui, repo=repo, hooktype=htype, **pycompat.strkwargs(args)) | r = obj(ui=ui, repo=repo, hooktype=htype, **pycompat.strkwargs(args)) | ||||
except Exception as exc: | except Exception as exc: | ||||
if isinstance(exc, error.Abort): | if isinstance(exc, error.Abort): | ||||
ui.warn(_(b'error: %s hook failed: %s\n') % (hname, exc.args[0])) | ui.warn(_(b'error: %s hook failed: %s\n') % (hname, exc.args[0])) | ||||
else: | else: | ||||
ui.warn( | ui.warn( | ||||
_(b'error: %s hook raised an exception: ' b'%s\n') | _(b'error: %s hook raised an exception: %s\n') | ||||
% (hname, stringutil.forcebytestr(exc)) | % (hname, stringutil.forcebytestr(exc)) | ||||
) | ) | ||||
if throw: | if throw: | ||||
raise | raise | ||||
if not ui.tracebackflag: | if not ui.tracebackflag: | ||||
ui.warn(_(b'(run with --traceback for stack trace)\n')) | ui.warn(_(b'(run with --traceback for stack trace)\n')) | ||||
ui.traceback() | ui.traceback() | ||||
return True, True | return True, True |
# Unless we end up supporting CBOR in the legacy wire protocol, | # Unless we end up supporting CBOR in the legacy wire protocol, | ||||
# this should ONLY be encountered for the initial capabilities | # this should ONLY be encountered for the initial capabilities | ||||
# request during handshake. | # request during handshake. | ||||
if subtype == b'cbor': | if subtype == b'cbor': | ||||
if allowcbor: | if allowcbor: | ||||
return respurl, proto, resp | return respurl, proto, resp | ||||
else: | else: | ||||
raise error.RepoError( | raise error.RepoError( | ||||
_(b'unexpected CBOR response from ' b'server') | _(b'unexpected CBOR response from server') | ||||
) | ) | ||||
version_info = tuple([int(n) for n in subtype.split(b'.')]) | version_info = tuple([int(n) for n in subtype.split(b'.')]) | ||||
except ValueError: | except ValueError: | ||||
raise error.RepoError( | raise error.RepoError( | ||||
_(b"'%s' sent a broken Content-Type " b"header (%s)") | _(b"'%s' sent a broken Content-Type header (%s)") % (safeurl, proto) | ||||
% (safeurl, proto) | |||||
) | ) | ||||
# TODO consider switching to a decompression reader that uses | # TODO consider switching to a decompression reader that uses | ||||
# generators. | # generators. | ||||
if version_info == (0, 1): | if version_info == (0, 1): | ||||
if compressible: | if compressible: | ||||
resp = util.compengines[b'zlib'].decompressorreader(resp) | resp = util.compengines[b'zlib'].decompressorreader(resp) | ||||
return self | return self | ||||
def __exit__(self, exctype, excvalue, exctb): | def __exit__(self, exctype, excvalue, exctb): | ||||
self.close() | self.close() | ||||
def callcommand(self, command, args): | def callcommand(self, command, args): | ||||
if self._sent: | if self._sent: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'callcommand() cannot be used after ' b'commands are sent' | b'callcommand() cannot be used after commands are sent' | ||||
) | ) | ||||
if self._closed: | if self._closed: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'callcommand() cannot be used after ' b'close()' | b'callcommand() cannot be used after close()' | ||||
) | ) | ||||
# The service advertises which commands are available. So if we attempt | # The service advertises which commands are available. So if we attempt | ||||
# to call an unknown command or pass an unknown argument, we can screen | # to call an unknown command or pass an unknown argument, we can screen | ||||
# for this. | # for this. | ||||
if command not in self._descriptor[b'commands']: | if command not in self._descriptor[b'commands']: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'wire protocol command %s is not available' % command | b'wire protocol command %s is not available' % command | ||||
permissions = set(self._neededpermissions) | permissions = set(self._neededpermissions) | ||||
if b'push' in permissions and b'pull' in permissions: | if b'push' in permissions and b'pull' in permissions: | ||||
permissions.remove(b'pull') | permissions.remove(b'pull') | ||||
if len(permissions) > 1: | if len(permissions) > 1: | ||||
raise error.RepoError( | raise error.RepoError( | ||||
_(b'cannot make request requiring multiple ' b'permissions: %s') | _(b'cannot make request requiring multiple permissions: %s') | ||||
% _(b', ').join(sorted(permissions)) | % _(b', ').join(sorted(permissions)) | ||||
) | ) | ||||
permission = {b'push': b'rw', b'pull': b'ro',}[permissions.pop()] | permission = {b'push': b'rw', b'pull': b'ro',}[permissions.pop()] | ||||
handler, resp = sendv2request( | handler, resp = sendv2request( | ||||
self._ui, | self._ui, | ||||
self._opener, | self._opener, | ||||
def instance(ui, path, create, intents=None, createopts=None): | def instance(ui, path, create, intents=None, createopts=None): | ||||
if create: | if create: | ||||
raise error.Abort(_(b'cannot create new http repository')) | raise error.Abort(_(b'cannot create new http repository')) | ||||
try: | try: | ||||
if path.startswith(b'https:') and not urlmod.has_https: | if path.startswith(b'https:') and not urlmod.has_https: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'Python support for SSL and HTTPS ' b'is not installed') | _(b'Python support for SSL and HTTPS is not installed') | ||||
) | ) | ||||
inst = makepeer(ui, path) | inst = makepeer(ui, path) | ||||
return inst | return inst | ||||
except error.RepoError as httpexception: | except error.RepoError as httpexception: | ||||
try: | try: | ||||
r = statichttprepo.instance(ui, b"static-" + path, create) | r = statichttprepo.instance(ui, b"static-" + path, create) | ||||
ui.note(_(b'(falling back to static-http)\n')) | ui.note(_(b'(falling back to static-http)\n')) | ||||
return r | return r | ||||
except error.RepoError: | except error.RepoError: | ||||
raise httpexception # use the original http RepoError instead | raise httpexception # use the original http RepoError instead |
# first. We previously got into a nasty loop | # first. We previously got into a nasty loop | ||||
# where an exception was uncaught, and so the | # where an exception was uncaught, and so the | ||||
# connection stayed open. On the next try, the | # connection stayed open. On the next try, the | ||||
# same exception was raised, etc. The trade-off is | # same exception was raised, etc. The trade-off is | ||||
# that it's now possible this call will raise | # that it's now possible this call will raise | ||||
# a DIFFERENT exception | # a DIFFERENT exception | ||||
if DEBUG: | if DEBUG: | ||||
DEBUG.error( | DEBUG.error( | ||||
b"unexpected exception - closing " b"connection to %s (%d)", | b"unexpected exception - closing connection to %s (%d)", | ||||
host, | host, | ||||
id(h), | id(h), | ||||
) | ) | ||||
self._cm.remove(h) | self._cm.remove(h) | ||||
h.close() | h.close() | ||||
raise | raise | ||||
if r is None or r.version == 9: | if r is None or r.version == 9: |
return self | return self | ||||
def __exit__(self, exctype, excvalue, exctb): | def __exit__(self, exctype, excvalue, exctb): | ||||
self.close() | self.close() | ||||
def callcommand(self, command, args): | def callcommand(self, command, args): | ||||
if self._sent: | if self._sent: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'callcommand() cannot be used after ' b'sendcommands()' | b'callcommand() cannot be used after sendcommands()' | ||||
) | ) | ||||
if self._closed: | if self._closed: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'callcommand() cannot be used after ' b'close()' | b'callcommand() cannot be used after close()' | ||||
) | ) | ||||
# We don't need to support anything fancy. Just call the named | # We don't need to support anything fancy. Just call the named | ||||
# method on the peer and return a resolved future. | # method on the peer and return a resolved future. | ||||
fn = getattr(self._peer, pycompat.sysstr(command)) | fn = getattr(self._peer, pycompat.sysstr(command)) | ||||
f = pycompat.futures.Future() | f = pycompat.futures.Future() | ||||
def lookup(self, key): | def lookup(self, key): | ||||
return self._repo.lookup(key) | return self._repo.lookup(key) | ||||
def pushkey(self, namespace, key, old, new): | def pushkey(self, namespace, key, old, new): | ||||
return self._repo.pushkey(namespace, key, old, new) | return self._repo.pushkey(namespace, key, old, new) | ||||
def stream_out(self): | def stream_out(self): | ||||
raise error.Abort( | raise error.Abort(_(b'cannot perform stream clone against local peer')) | ||||
_(b'cannot perform stream clone against local ' b'peer') | |||||
) | |||||
def unbundle(self, bundle, heads, url): | def unbundle(self, bundle, heads, url): | ||||
"""apply a bundle on a repo | """apply a bundle on a repo | ||||
This function handles the repo locking itself.""" | This function handles the repo locking itself.""" | ||||
try: | try: | ||||
try: | try: | ||||
bundle = exchange.readbundle(self.ui, bundle, None) | bundle = exchange.readbundle(self.ui, bundle, None) | ||||
sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n') | sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n') | ||||
if b'relshared' in requirements: | if b'relshared' in requirements: | ||||
sharedpath = hgvfs.join(sharedpath) | sharedpath = hgvfs.join(sharedpath) | ||||
sharedvfs = vfsmod.vfs(sharedpath, realpath=True) | sharedvfs = vfsmod.vfs(sharedpath, realpath=True) | ||||
if not sharedvfs.exists(): | if not sharedvfs.exists(): | ||||
raise error.RepoError( | raise error.RepoError( | ||||
_(b'.hg/sharedpath points to nonexistent ' b'directory %s') | _(b'.hg/sharedpath points to nonexistent directory %s') | ||||
% sharedvfs.base | % sharedvfs.base | ||||
) | ) | ||||
features.add(repository.REPO_FEATURE_SHARED_STORAGE) | features.add(repository.REPO_FEATURE_SHARED_STORAGE) | ||||
storebasepath = sharedvfs.base | storebasepath = sharedvfs.base | ||||
cachepath = sharedvfs.join(b'cache') | cachepath = sharedvfs.join(b'cache') | ||||
else: | else: | ||||
def _dirstatevalidate(self, node): | def _dirstatevalidate(self, node): | ||||
try: | try: | ||||
self.changelog.rev(node) | self.changelog.rev(node) | ||||
return node | return node | ||||
except error.LookupError: | except error.LookupError: | ||||
if not self._dirstatevalidatewarned: | if not self._dirstatevalidatewarned: | ||||
self._dirstatevalidatewarned = True | self._dirstatevalidatewarned = True | ||||
self.ui.warn( | self.ui.warn( | ||||
_(b"warning: ignoring unknown" b" working parent %s!\n") | _(b"warning: ignoring unknown working parent %s!\n") | ||||
% short(node) | % short(node) | ||||
) | ) | ||||
return nullid | return nullid | ||||
@storecache(narrowspec.FILENAME) | @storecache(narrowspec.FILENAME) | ||||
def narrowpats(self): | def narrowpats(self): | ||||
"""matcher patterns for this repository's narrowspec | """matcher patterns for this repository's narrowspec | ||||
if detail and ui.verbose: | if detail and ui.verbose: | ||||
msg = _( | msg = _( | ||||
b'repository tip rolled back to revision %d' | b'repository tip rolled back to revision %d' | ||||
b' (undo %s: %s)\n' | b' (undo %s: %s)\n' | ||||
) % (oldtip, desc, detail) | ) % (oldtip, desc, detail) | ||||
else: | else: | ||||
msg = _( | msg = _( | ||||
b'repository tip rolled back to revision %d' b' (undo %s)\n' | b'repository tip rolled back to revision %d (undo %s)\n' | ||||
) % (oldtip, desc) | ) % (oldtip, desc) | ||||
except IOError: | except IOError: | ||||
msg = _(b'rolling back unknown transaction\n') | msg = _(b'rolling back unknown transaction\n') | ||||
desc = None | desc = None | ||||
if not force and self[b'.'] != self[b'tip'] and desc == b'commit': | if not force and self[b'.'] != self[b'tip'] and desc == b'commit': | ||||
raise error.Abort( | raise error.Abort( | ||||
_( | _( | ||||
_( | _( | ||||
b'working directory now based on ' | b'working directory now based on ' | ||||
b'revisions %d and %d\n' | b'revisions %d and %d\n' | ||||
) | ) | ||||
% parents | % parents | ||||
) | ) | ||||
else: | else: | ||||
ui.status( | ui.status( | ||||
_(b'working directory now based on ' b'revision %d\n') | _(b'working directory now based on revision %d\n') % parents | ||||
% parents | |||||
) | ) | ||||
mergemod.mergestate.clean(self, self[b'.'].node()) | mergemod.mergestate.clean(self, self[b'.'].node()) | ||||
# TODO: if we know which new heads may result from this rollback, pass | # TODO: if we know which new heads may result from this rollback, pass | ||||
# them to destroy(), which will prevent the branchhead cache from being | # them to destroy(), which will prevent the branchhead cache from being | ||||
# invalidated. | # invalidated. | ||||
self.destroyed() | self.destroyed() | ||||
return 0 | return 0 | ||||
revisions are known). | revisions are known). | ||||
""" | """ | ||||
createopts = defaultcreateopts(ui, createopts=createopts) | createopts = defaultcreateopts(ui, createopts=createopts) | ||||
unknownopts = filterknowncreateopts(ui, createopts) | unknownopts = filterknowncreateopts(ui, createopts) | ||||
if not isinstance(unknownopts, dict): | if not isinstance(unknownopts, dict): | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'filterknowncreateopts() did not return ' b'a dict' | b'filterknowncreateopts() did not return a dict' | ||||
) | ) | ||||
if unknownopts: | if unknownopts: | ||||
raise error.Abort( | raise error.Abort( | ||||
_( | _( | ||||
b'unable to create repository because of unknown ' | b'unable to create repository because of unknown ' | ||||
b'creation option: %s' | b'creation option: %s' | ||||
) | ) | ||||
# But we have to allow the close() method because some constructors | # But we have to allow the close() method because some constructors | ||||
# of repos call close() on repo references. | # of repos call close() on repo references. | ||||
class poisonedrepository(object): | class poisonedrepository(object): | ||||
def __getattribute__(self, item): | def __getattribute__(self, item): | ||||
if item == r'close': | if item == r'close': | ||||
return object.__getattribute__(self, item) | return object.__getattribute__(self, item) | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'repo instances should not be used ' b'after unshare' | b'repo instances should not be used after unshare' | ||||
) | ) | ||||
def close(self): | def close(self): | ||||
pass | pass | ||||
# We may have a repoview, which intercepts __setattr__. So be sure | # We may have a repoview, which intercepts __setattr__. So be sure | ||||
# we operate at the lowest level possible. | # we operate at the lowest level possible. | ||||
object.__setattr__(repo, r'__class__', poisonedrepository) | object.__setattr__(repo, r'__class__', poisonedrepository) |
""" | """ | ||||
wctx = repo[None] | wctx = repo[None] | ||||
# Two-levels map of "rev -> file ctx -> [line range]". | # Two-levels map of "rev -> file ctx -> [line range]". | ||||
linerangesbyrev = {} | linerangesbyrev = {} | ||||
for fname, (fromline, toline) in _parselinerangeopt(repo, opts): | for fname, (fromline, toline) in _parselinerangeopt(repo, opts): | ||||
if fname not in wctx: | if fname not in wctx: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot follow file not in parent ' b'revision: "%s"') | _(b'cannot follow file not in parent revision: "%s"') % fname | ||||
% fname | |||||
) | ) | ||||
fctx = wctx.filectx(fname) | fctx = wctx.filectx(fname) | ||||
for fctx, linerange in dagop.blockancestors(fctx, fromline, toline): | for fctx, linerange in dagop.blockancestors(fctx, fromline, toline): | ||||
rev = fctx.introrev() | rev = fctx.introrev() | ||||
if rev not in userrevs: | if rev not in userrevs: | ||||
continue | continue | ||||
linerangesbyrev.setdefault(rev, {}).setdefault( | linerangesbyrev.setdefault(rev, {}).setdefault( | ||||
fctx.path(), [] | fctx.path(), [] |
_( | _( | ||||
b'smtp specified as email transport, ' | b'smtp specified as email transport, ' | ||||
b'but no smtp host configured' | b'but no smtp host configured' | ||||
) | ) | ||||
) | ) | ||||
else: | else: | ||||
if not procutil.findexe(method): | if not procutil.findexe(method): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'%r specified as email transport, ' b'but not in PATH') | _(b'%r specified as email transport, but not in PATH') % method | ||||
% method | |||||
) | ) | ||||
def codec2iana(cs): | def codec2iana(cs): | ||||
'''''' | '''''' | ||||
cs = pycompat.sysbytes(email.charset.Charset(cs).input_charset.lower()) | cs = pycompat.sysbytes(email.charset.Charset(cs).input_charset.lower()) | ||||
# "latin1" normalizes to "iso8859-1", standard calls for "iso-8859-1" | # "latin1" normalizes to "iso8859-1", standard calls for "iso-8859-1" |
self._lazydirs = {} | self._lazydirs = {} | ||||
# Using _lazymanifest here is a little slower than plain old dicts | # Using _lazymanifest here is a little slower than plain old dicts | ||||
self._files = {} | self._files = {} | ||||
self._flags = {} | self._flags = {} | ||||
if text: | if text: | ||||
def readsubtree(subdir, subm): | def readsubtree(subdir, subm): | ||||
raise AssertionError( | raise AssertionError( | ||||
b'treemanifest constructor only accepts ' b'flat manifests' | b'treemanifest constructor only accepts flat manifests' | ||||
) | ) | ||||
self.parse(text, readsubtree) | self.parse(text, readsubtree) | ||||
self._dirty = True # Mark flat manifest dirty after parsing | self._dirty = True # Mark flat manifest dirty after parsing | ||||
def _subpath(self, path): | def _subpath(self, path): | ||||
return self._dir + path | return self._dir + path | ||||
'''Returns the kindpats list with the 'set' patterns expanded to matchers''' | '''Returns the kindpats list with the 'set' patterns expanded to matchers''' | ||||
matchers = [] | matchers = [] | ||||
other = [] | other = [] | ||||
for kind, pat, source in kindpats: | for kind, pat, source in kindpats: | ||||
if kind == b'set': | if kind == b'set': | ||||
if ctx is None: | if ctx is None: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b"fileset expression with no " b"context" | b"fileset expression with no context" | ||||
) | ) | ||||
matchers.append(ctx.matchfileset(pat, badfn=badfn)) | matchers.append(ctx.matchfileset(pat, badfn=badfn)) | ||||
if listsubrepos: | if listsubrepos: | ||||
for subpath in ctx.substate: | for subpath in ctx.substate: | ||||
sm = ctx.sub(subpath).matchfileset(pat, badfn=badfn) | sm = ctx.sub(subpath).matchfileset(pat, badfn=badfn) | ||||
pm = prefixdirmatcher(subpath, sm, badfn=badfn) | pm = prefixdirmatcher(subpath, sm, badfn=badfn) | ||||
matchers.append(pm) | matchers.append(pm) | ||||
self.matchfn | self.matchfn | ||||
) | ) | ||||
return b'<predicatenmatcher pred=%s>' % s | return b'<predicatenmatcher pred=%s>' % s | ||||
def normalizerootdir(dir, funcname): | def normalizerootdir(dir, funcname): | ||||
if dir == b'.': | if dir == b'.': | ||||
util.nouideprecwarn( | util.nouideprecwarn( | ||||
b"match.%s() no longer accepts " b"'.', use '' instead." % funcname, | b"match.%s() no longer accepts '.', use '' instead." % funcname, | ||||
b'5.1', | b'5.1', | ||||
) | ) | ||||
return b'' | return b'' | ||||
return dir | return dir | ||||
class patternmatcher(basematcher): | class patternmatcher(basematcher): | ||||
"""Matches a set of (kind, pat, source) against a 'root' directory. | """Matches a set of (kind, pat, source) against a 'root' directory. |
if v is None: | if v is None: | ||||
v = self.defaults[k] | v = self.defaults[k] | ||||
setattr(self, k, v) | setattr(self, k, v) | ||||
try: | try: | ||||
self.context = int(self.context) | self.context = int(self.context) | ||||
except ValueError: | except ValueError: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'diff context lines count must be ' b'an integer, not %r') | _(b'diff context lines count must be an integer, not %r') | ||||
% pycompat.bytestr(self.context) | % pycompat.bytestr(self.context) | ||||
) | ) | ||||
def copy(self, **kwargs): | def copy(self, **kwargs): | ||||
opts = dict((k, getattr(self, k)) for k in self.defaults) | opts = dict((k, getattr(self, k)) for k in self.defaults) | ||||
opts = pycompat.strkwargs(opts) | opts = pycompat.strkwargs(opts) | ||||
opts.update(kwargs) | opts.update(kwargs) | ||||
return diffopts(**opts) | return diffopts(**opts) |
def _getcheckunknownconfig(repo, section, name): | def _getcheckunknownconfig(repo, section, name): | ||||
config = repo.ui.config(section, name) | config = repo.ui.config(section, name) | ||||
valid = [b'abort', b'ignore', b'warn'] | valid = [b'abort', b'ignore', b'warn'] | ||||
if config not in valid: | if config not in valid: | ||||
validstr = b', '.join([b"'" + v + b"'" for v in valid]) | validstr = b', '.join([b"'" + v + b"'" for v in valid]) | ||||
raise error.ConfigError( | raise error.ConfigError( | ||||
_(b"%s.%s not valid " b"('%s' is none of %s)") | _(b"%s.%s not valid ('%s' is none of %s)") | ||||
% (section, name, config, validstr) | % (section, name, config, validstr) | ||||
) | ) | ||||
return config | return config | ||||
def _checkunknownfile(repo, wctx, mctx, f, f2=None): | def _checkunknownfile(repo, wctx, mctx, f, f2=None): | ||||
if wctx.isinmemory(): | if wctx.isinmemory(): | ||||
# Nothing to do in IMM because nothing in the "working copy" can be an | # Nothing to do in IMM because nothing in the "working copy" can be an | ||||
foldmap[fold] = f | foldmap[fold] = f | ||||
# check case-folding of directories | # check case-folding of directories | ||||
foldprefix = unfoldprefix = lastfull = b'' | foldprefix = unfoldprefix = lastfull = b'' | ||||
for fold, f in sorted(foldmap.items()): | for fold, f in sorted(foldmap.items()): | ||||
if fold.startswith(foldprefix) and not f.startswith(unfoldprefix): | if fold.startswith(foldprefix) and not f.startswith(unfoldprefix): | ||||
# the folded prefix matches but actual casing is different | # the folded prefix matches but actual casing is different | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"case-folding collision between " b"%s and directory of %s") | _(b"case-folding collision between %s and directory of %s") | ||||
% (lastfull, f) | % (lastfull, f) | ||||
) | ) | ||||
foldprefix = fold + b'/' | foldprefix = fold + b'/' | ||||
unfoldprefix = f + b'/' | unfoldprefix = f + b'/' | ||||
lastfull = f | lastfull = f | ||||
def driverpreprocess(repo, ms, wctx, labels=None): | def driverpreprocess(repo, ms, wctx, labels=None): | ||||
del actions[f] # merge does not affect file | del actions[f] # merge does not affect file | ||||
elif action[0] in nonconflicttypes: | elif action[0] in nonconflicttypes: | ||||
raise error.Abort( | raise error.Abort( | ||||
_( | _( | ||||
b'merge affects file \'%s\' outside narrow, ' | b'merge affects file \'%s\' outside narrow, ' | ||||
b'which is not yet supported' | b'which is not yet supported' | ||||
) | ) | ||||
% f, | % f, | ||||
hint=_(b'merging in the other direction ' b'may work'), | hint=_(b'merging in the other direction may work'), | ||||
) | ) | ||||
else: | else: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'conflict in file \'%s\' is outside ' b'narrow clone') % f | _(b'conflict in file \'%s\' is outside narrow clone') % f | ||||
) | ) | ||||
def manifestmerge( | def manifestmerge( | ||||
repo, | repo, | ||||
wctx, | wctx, | ||||
p2, | p2, | ||||
pa, | pa, | ||||
# the ordering is important here -- ms.mergedriver will raise if the merge | # the ordering is important here -- ms.mergedriver will raise if the merge | ||||
# driver has changed, and we want to be able to bypass it when overwrite is | # driver has changed, and we want to be able to bypass it when overwrite is | ||||
# True | # True | ||||
usemergedriver = not overwrite and mergeactions and ms.mergedriver | usemergedriver = not overwrite and mergeactions and ms.mergedriver | ||||
if usemergedriver: | if usemergedriver: | ||||
if wctx.isinmemory(): | if wctx.isinmemory(): | ||||
raise error.InMemoryMergeConflictsError( | raise error.InMemoryMergeConflictsError( | ||||
b"in-memory merge does not " b"support mergedriver" | b"in-memory merge does not support mergedriver" | ||||
) | ) | ||||
ms.commit() | ms.commit() | ||||
proceed = driverpreprocess(repo, ms, wctx, labels=labels) | proceed = driverpreprocess(repo, ms, wctx, labels=labels) | ||||
# the driver might leave some files unresolved | # the driver might leave some files unresolved | ||||
unresolvedf = set(ms.unresolved()) | unresolvedf = set(ms.unresolved()) | ||||
if not proceed: | if not proceed: | ||||
# XXX setting unresolved to at least 1 is a hack to make sure we | # XXX setting unresolved to at least 1 is a hack to make sure we | ||||
# error out | # error out | ||||
b"merging with a working directory ancestor" | b"merging with a working directory ancestor" | ||||
b" has no effect" | b" has no effect" | ||||
) | ) | ||||
) | ) | ||||
elif pas == [p1]: | elif pas == [p1]: | ||||
if not mergeancestor and wc.branch() == p2.branch(): | if not mergeancestor and wc.branch() == p2.branch(): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"nothing to merge"), | _(b"nothing to merge"), | ||||
hint=_(b"use 'hg update' " b"or check 'hg heads'"), | hint=_(b"use 'hg update' or check 'hg heads'"), | ||||
) | ) | ||||
if not force and (wc.files() or wc.deleted()): | if not force and (wc.files() or wc.deleted()): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"uncommitted changes"), | _(b"uncommitted changes"), | ||||
hint=_(b"use 'hg status' to list changes"), | hint=_(b"use 'hg status' to list changes"), | ||||
) | ) | ||||
if not wc.isinmemory(): | if not wc.isinmemory(): | ||||
for s in sorted(wc.substate): | for s in sorted(wc.substate): |
from .i18n import _ | from .i18n import _ | ||||
from . import error | from . import error | ||||
def checkunresolved(ms): | def checkunresolved(ms): | ||||
if list(ms.unresolved()): | if list(ms.unresolved()): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"unresolved merge conflicts " b"(see 'hg help resolve')") | _(b"unresolved merge conflicts (see 'hg help resolve')") | ||||
) | ) | ||||
if ms.mdstate() != b's' or list(ms.driverresolved()): | if ms.mdstate() != b's' or list(ms.driverresolved()): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'driver-resolved merge conflicts'), | _(b'driver-resolved merge conflicts'), | ||||
hint=_(b'run "hg resolve --all" to resolve'), | hint=_(b'run "hg resolve --all" to resolve'), | ||||
) | ) |
``rootfilesin:``. | ``rootfilesin:``. | ||||
This function should be used to validate internal data structures | This function should be used to validate internal data structures | ||||
and patterns that are loaded from sources that use the internal, | and patterns that are loaded from sources that use the internal, | ||||
prefixed pattern representation (but can't necessarily be fully trusted). | prefixed pattern representation (but can't necessarily be fully trusted). | ||||
""" | """ | ||||
if not isinstance(pats, set): | if not isinstance(pats, set): | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'narrow patterns should be a set; ' b'got %r' % pats | b'narrow patterns should be a set; got %r' % pats | ||||
) | ) | ||||
for pat in pats: | for pat in pats: | ||||
if not pat.startswith(VALID_PREFIXES): | if not pat.startswith(VALID_PREFIXES): | ||||
# Use a Mercurial exception because this can happen due to user | # Use a Mercurial exception because this can happen due to user | ||||
# bugs (e.g. manually updating spec file). | # bugs (e.g. manually updating spec file). | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'invalid prefix on narrow pattern: %s') % pat, | _(b'invalid prefix on narrow pattern: %s') % pat, |
def add(self, transaction, markers): | def add(self, transaction, markers): | ||||
"""Add new markers to the store | """Add new markers to the store | ||||
Take care of filtering duplicate. | Take care of filtering duplicate. | ||||
Return the number of new marker.""" | Return the number of new marker.""" | ||||
if self._readonly: | if self._readonly: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'creating obsolete markers is not enabled on ' b'this repo') | _(b'creating obsolete markers is not enabled on this repo') | ||||
) | ) | ||||
known = set() | known = set() | ||||
getsuccessors = self.successors.get | getsuccessors = self.successors.get | ||||
new = [] | new = [] | ||||
for m in markers: | for m in markers: | ||||
if m not in getsuccessors(m[0], ()) and m not in known: | if m not in getsuccessors(m[0], ()) and m not in known: | ||||
known.add(m) | known.add(m) | ||||
new.append(m) | new.append(m) |
filteredmsgtable = { | filteredmsgtable = { | ||||
b"pruned": _(b"hidden revision '%s' is pruned"), | b"pruned": _(b"hidden revision '%s' is pruned"), | ||||
b"diverged": _(b"hidden revision '%s' has diverged"), | b"diverged": _(b"hidden revision '%s' has diverged"), | ||||
b"superseded": _(b"hidden revision '%s' was rewritten as: %s"), | b"superseded": _(b"hidden revision '%s' was rewritten as: %s"), | ||||
b"superseded_split": _(b"hidden revision '%s' was split as: %s"), | b"superseded_split": _(b"hidden revision '%s' was split as: %s"), | ||||
b"superseded_split_several": _( | b"superseded_split_several": _( | ||||
b"hidden revision '%s' was split as: %s and " b"%d more" | b"hidden revision '%s' was split as: %s and %d more" | ||||
), | ), | ||||
} | } | ||||
def _getfilteredreason(repo, changeid, ctx): | def _getfilteredreason(repo, changeid, ctx): | ||||
"""return a human-friendly string on why a obsolete changeset is hidden | """return a human-friendly string on why a obsolete changeset is hidden | ||||
""" | """ | ||||
successors = successorssets(repo, ctx.node()) | successors = successorssets(repo, ctx.node()) |
""" | """ | ||||
poskeys, varkey, keys, optkey = argspec | poskeys, varkey, keys, optkey = argspec | ||||
kwstart = next( | kwstart = next( | ||||
(i for i, x in enumerate(trees) if x and x[0] == keyvaluenode), | (i for i, x in enumerate(trees) if x and x[0] == keyvaluenode), | ||||
len(trees), | len(trees), | ||||
) | ) | ||||
if kwstart < len(poskeys): | if kwstart < len(poskeys): | ||||
raise error.ParseError( | raise error.ParseError( | ||||
_(b"%(func)s takes at least %(nargs)d positional " b"arguments") | _(b"%(func)s takes at least %(nargs)d positional arguments") | ||||
% {b'func': funcname, b'nargs': len(poskeys)} | % {b'func': funcname, b'nargs': len(poskeys)} | ||||
) | ) | ||||
if not varkey and kwstart > len(poskeys) + len(keys): | if not varkey and kwstart > len(poskeys) + len(keys): | ||||
raise error.ParseError( | raise error.ParseError( | ||||
_(b"%(func)s takes at most %(nargs)d positional " b"arguments") | _(b"%(func)s takes at most %(nargs)d positional arguments") | ||||
% {b'func': funcname, b'nargs': len(poskeys) + len(keys)} | % {b'func': funcname, b'nargs': len(poskeys) + len(keys)} | ||||
) | ) | ||||
args = util.sortdict() | args = util.sortdict() | ||||
# consume positional arguments | # consume positional arguments | ||||
for k, x in zip(poskeys, trees[:kwstart]): | for k, x in zip(poskeys, trees[:kwstart]): | ||||
args[k] = x | args[k] = x | ||||
if varkey: | if varkey: | ||||
args[varkey] = trees[len(args) : kwstart] | args[varkey] = trees[len(args) : kwstart] | ||||
else: | else: | ||||
for k, x in zip(keys, trees[len(args) : kwstart]): | for k, x in zip(keys, trees[len(args) : kwstart]): | ||||
args[k] = x | args[k] = x | ||||
# remainder should be keyword arguments | # remainder should be keyword arguments | ||||
if optkey: | if optkey: | ||||
args[optkey] = util.sortdict() | args[optkey] = util.sortdict() | ||||
for x in trees[kwstart:]: | for x in trees[kwstart:]: | ||||
if not x or x[0] != keyvaluenode or x[1][0] != keynode: | if not x or x[0] != keyvaluenode or x[1][0] != keynode: | ||||
raise error.ParseError( | raise error.ParseError( | ||||
_(b"%(func)s got an invalid argument") % {b'func': funcname} | _(b"%(func)s got an invalid argument") % {b'func': funcname} | ||||
) | ) | ||||
k = x[1][1] | k = x[1][1] | ||||
if k in keys: | if k in keys: | ||||
d = args | d = args | ||||
elif not optkey: | elif not optkey: | ||||
raise error.ParseError( | raise error.ParseError( | ||||
_(b"%(func)s got an unexpected keyword " b"argument '%(key)s'") | _(b"%(func)s got an unexpected keyword argument '%(key)s'") | ||||
% {b'func': funcname, b'key': k} | % {b'func': funcname, b'key': k} | ||||
) | ) | ||||
else: | else: | ||||
d = args[optkey] | d = args[optkey] | ||||
if k in d: | if k in d: | ||||
raise error.ParseError( | raise error.ParseError( | ||||
_( | _( | ||||
b"%(func)s got multiple values for keyword " | b"%(func)s got multiple values for keyword " | ||||
return tuple( | return tuple( | ||||
cls._expand(aliases, t, expanding, cache) for t in tree | cls._expand(aliases, t, expanding, cache) for t in tree | ||||
) | ) | ||||
a, l = r | a, l = r | ||||
if a.error: | if a.error: | ||||
raise error.Abort(a.error) | raise error.Abort(a.error) | ||||
if a in expanding: | if a in expanding: | ||||
raise error.ParseError( | raise error.ParseError( | ||||
_(b'infinite expansion of %(section)s ' b'"%(name)s" detected') | _(b'infinite expansion of %(section)s "%(name)s" detected') | ||||
% {b'section': cls._section, b'name': a.name} | % {b'section': cls._section, b'name': a.name} | ||||
) | ) | ||||
# get cacheable replacement tree by expanding aliases recursively | # get cacheable replacement tree by expanding aliases recursively | ||||
expanding.append(a) | expanding.append(a) | ||||
if a.name not in cache: | if a.name not in cache: | ||||
cache[a.name] = cls._expand( | cache[a.name] = cls._expand( | ||||
aliases, a.replacement, expanding, cache | aliases, a.replacement, expanding, cache | ||||
) | ) |
stringutil, | stringutil, | ||||
) | ) | ||||
stringio = util.stringio | stringio = util.stringio | ||||
gitre = re.compile(br'diff --git a/(.*) b/(.*)') | gitre = re.compile(br'diff --git a/(.*) b/(.*)') | ||||
tabsplitter = re.compile(br'(\t+|[^\t]+)') | tabsplitter = re.compile(br'(\t+|[^\t]+)') | ||||
wordsplitter = re.compile( | wordsplitter = re.compile( | ||||
br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|' b'[^ \ta-zA-Z0-9_\x80-\xff])' | br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|[^ \ta-zA-Z0-9_\x80-\xff])' | ||||
) | ) | ||||
PatchError = error.PatchError | PatchError = error.PatchError | ||||
# public functions | # public functions | ||||
def split(stream): | def split(stream): | ||||
if self.missing: | if self.missing: | ||||
self.rej.append(h) | self.rej.append(h) | ||||
return -1 | return -1 | ||||
if self.exists and self.create: | if self.exists and self.create: | ||||
if self.copysource: | if self.copysource: | ||||
self.ui.warn( | self.ui.warn( | ||||
_(b"cannot create %s: destination already " b"exists\n") | _(b"cannot create %s: destination already exists\n") | ||||
% self.fname | % self.fname | ||||
) | ) | ||||
else: | else: | ||||
self.ui.warn(_(b"file %s already exists\n") % self.fname) | self.ui.warn(_(b"file %s already exists\n") % self.fname) | ||||
self.rej.append(h) | self.rej.append(h) | ||||
return -1 | return -1 | ||||
if isinstance(h, binhunk): | if isinstance(h, binhunk): | ||||
count, | count, | ||||
pluses, | pluses, | ||||
minuses, | minuses, | ||||
) | ) | ||||
) | ) | ||||
if stats: | if stats: | ||||
output.append( | output.append( | ||||
_(b' %d files changed, %d insertions(+), ' b'%d deletions(-)\n') | _(b' %d files changed, %d insertions(+), %d deletions(-)\n') | ||||
% (len(stats), totaladds, totalremoves) | % (len(stats), totaladds, totalremoves) | ||||
) | ) | ||||
return b''.join(output) | return b''.join(output) | ||||
def diffstatui(*args, **kw): | def diffstatui(*args, **kw): | ||||
'''like diffstat(), but yields 2-tuples of (output, label) for | '''like diffstat(), but yields 2-tuples of (output, label) for |
@contextlib.contextmanager | @contextlib.contextmanager | ||||
def lsprofile(ui, fp): | def lsprofile(ui, fp): | ||||
format = ui.config(b'profiling', b'format') | format = ui.config(b'profiling', b'format') | ||||
field = ui.config(b'profiling', b'sort') | field = ui.config(b'profiling', b'sort') | ||||
limit = ui.configint(b'profiling', b'limit') | limit = ui.configint(b'profiling', b'limit') | ||||
climit = ui.configint(b'profiling', b'nested') | climit = ui.configint(b'profiling', b'nested') | ||||
if format not in [b'text', b'kcachegrind']: | if format not in [b'text', b'kcachegrind']: | ||||
ui.warn( | ui.warn(_(b"unrecognized profiling format '%s' - Ignored\n") % format) | ||||
_(b"unrecognized profiling format '%s'" b" - Ignored\n") % format | |||||
) | |||||
format = b'text' | format = b'text' | ||||
try: | try: | ||||
from . import lsprof | from . import lsprof | ||||
except ImportError: | except ImportError: | ||||
raise error.Abort( | raise error.Abort( | ||||
_( | _( | ||||
b'lsprof not available - install from ' | b'lsprof not available - install from ' |
bytesurl = identity | bytesurl = identity | ||||
open = open | open = open | ||||
delattr = delattr | delattr = delattr | ||||
getattr = getattr | getattr = getattr | ||||
hasattr = hasattr | hasattr = hasattr | ||||
setattr = setattr | setattr = setattr | ||||
# this can't be parsed on Python 3 | # this can't be parsed on Python 3 | ||||
exec(b'def raisewithtb(exc, tb):\n' b' raise exc, None, tb\n') | exec(b'def raisewithtb(exc, tb):\n raise exc, None, tb\n') | ||||
def fsencode(filename): | def fsencode(filename): | ||||
""" | """ | ||||
Partial backport from os.py in Python 3, which only accepts bytes. | Partial backport from os.py in Python 3, which only accepts bytes. | ||||
In Python 2, our paths should only ever be bytes, a unicode path | In Python 2, our paths should only ever be bytes, a unicode path | ||||
indicates a bug. | indicates a bug. | ||||
""" | """ | ||||
if isinstance(filename, str): | if isinstance(filename, str): |
# affected = descendants(roots(wanted)) | # affected = descendants(roots(wanted)) | ||||
# wanted = revs | # wanted = revs | ||||
revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )' | revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )' | ||||
tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs)) | tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs)) | ||||
notstrip = revs - tostrip | notstrip = revs - tostrip | ||||
if notstrip: | if notstrip: | ||||
nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip)) | nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip)) | ||||
ui.warn( | ui.warn( | ||||
_(b'warning: orphaned descendants detected, ' b'not stripping %s\n') | _(b'warning: orphaned descendants detected, not stripping %s\n') | ||||
% nodestr | % nodestr | ||||
) | ) | ||||
return [c.node() for c in repo.set(b'roots(%ld)', tostrip)] | return [c.node() for c in repo.set(b'roots(%ld)', tostrip)] | ||||
class stripcallback(object): | class stripcallback(object): | ||||
"""used as a transaction postclose callback""" | """used as a transaction postclose callback""" | ||||
"""Return a cached repo if available | """Return a cached repo if available | ||||
This function must be called after fork(), where the loader thread | This function must be called after fork(), where the loader thread | ||||
is stopped. Otherwise, the returned repo might be updated by the | is stopped. Otherwise, the returned repo might be updated by the | ||||
loader thread. | loader thread. | ||||
""" | """ | ||||
if self._thread and self._thread.is_alive(): | if self._thread and self._thread.is_alive(): | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'cannot obtain cached repo while ' b'loader is active' | b'cannot obtain cached repo while loader is active' | ||||
) | ) | ||||
return self._cache.peek(path, None) | return self._cache.peek(path, None) | ||||
def _mainloop(self): | def _mainloop(self): | ||||
while self._accepting: | while self._accepting: | ||||
# Avoid heavy GC after fork(), which would cancel the benefit of | # Avoid heavy GC after fork(), which would cancel the benefit of | ||||
# COW. We assume that GIL is acquired while GC is underway in the | # COW. We assume that GIL is acquired while GC is underway in the | ||||
# loader thread. If that isn't true, we might have to move | # loader thread. If that isn't true, we might have to move |
nodemap[e[6]] = n | nodemap[e[6]] = n | ||||
n += 1 | n += 1 | ||||
return revlogoldindex(index), nodemap, None | return revlogoldindex(index), nodemap, None | ||||
def packentry(self, entry, node, version, rev): | def packentry(self, entry, node, version, rev): | ||||
if gettype(entry[0]): | if gettype(entry[0]): | ||||
raise error.RevlogError( | raise error.RevlogError( | ||||
_(b'index entry flags need revlog ' b'version 1') | _(b'index entry flags need revlog version 1') | ||||
) | ) | ||||
e2 = ( | e2 = ( | ||||
getoffset(entry[0]), | getoffset(entry[0]), | ||||
entry[1], | entry[1], | ||||
entry[3], | entry[3], | ||||
entry[4], | entry[4], | ||||
node(entry[5]), | node(entry[5]), | ||||
node(entry[6]), | node(entry[6]), | ||||
# revlog v0 doesn't have flag processors | # revlog v0 doesn't have flag processors | ||||
for flag, processor in pycompat.iteritems( | for flag, processor in pycompat.iteritems( | ||||
opts.get(b'flagprocessors', {}) | opts.get(b'flagprocessors', {}) | ||||
): | ): | ||||
flagutil.insertflagprocessor(flag, processor, self._flagprocessors) | flagutil.insertflagprocessor(flag, processor, self._flagprocessors) | ||||
if self._chunkcachesize <= 0: | if self._chunkcachesize <= 0: | ||||
raise error.RevlogError( | raise error.RevlogError( | ||||
_(b'revlog chunk cache size %r is not ' b'greater than 0') | _(b'revlog chunk cache size %r is not greater than 0') | ||||
% self._chunkcachesize | % self._chunkcachesize | ||||
) | ) | ||||
elif self._chunkcachesize & (self._chunkcachesize - 1): | elif self._chunkcachesize & (self._chunkcachesize - 1): | ||||
raise error.RevlogError( | raise error.RevlogError( | ||||
_(b'revlog chunk cache size %r is not a ' b'power of 2') | _(b'revlog chunk cache size %r is not a power of 2') | ||||
% self._chunkcachesize | % self._chunkcachesize | ||||
) | ) | ||||
indexdata = b'' | indexdata = b'' | ||||
self._initempty = True | self._initempty = True | ||||
try: | try: | ||||
with self._indexfp() as f: | with self._indexfp() as f: | ||||
if ( | if ( | ||||
self.version = versionflags | self.version = versionflags | ||||
flags = versionflags & ~0xFFFF | flags = versionflags & ~0xFFFF | ||||
fmt = versionflags & 0xFFFF | fmt = versionflags & 0xFFFF | ||||
if fmt == REVLOGV0: | if fmt == REVLOGV0: | ||||
if flags: | if flags: | ||||
raise error.RevlogError( | raise error.RevlogError( | ||||
_(b'unknown flags (%#04x) in version %d ' b'revlog %s') | _(b'unknown flags (%#04x) in version %d revlog %s') | ||||
% (flags >> 16, fmt, self.indexfile) | % (flags >> 16, fmt, self.indexfile) | ||||
) | ) | ||||
self._inline = False | self._inline = False | ||||
self._generaldelta = False | self._generaldelta = False | ||||
elif fmt == REVLOGV1: | elif fmt == REVLOGV1: | ||||
if flags & ~REVLOGV1_FLAGS: | if flags & ~REVLOGV1_FLAGS: | ||||
raise error.RevlogError( | raise error.RevlogError( | ||||
_(b'unknown flags (%#04x) in version %d ' b'revlog %s') | _(b'unknown flags (%#04x) in version %d revlog %s') | ||||
% (flags >> 16, fmt, self.indexfile) | % (flags >> 16, fmt, self.indexfile) | ||||
) | ) | ||||
self._inline = versionflags & FLAG_INLINE_DATA | self._inline = versionflags & FLAG_INLINE_DATA | ||||
self._generaldelta = versionflags & FLAG_GENERALDELTA | self._generaldelta = versionflags & FLAG_GENERALDELTA | ||||
elif fmt == REVLOGV2: | elif fmt == REVLOGV2: | ||||
if flags & ~REVLOGV2_FLAGS: | if flags & ~REVLOGV2_FLAGS: | ||||
raise error.RevlogError( | raise error.RevlogError( | ||||
_(b'unknown flags (%#04x) in version %d ' b'revlog %s') | _(b'unknown flags (%#04x) in version %d revlog %s') | ||||
% (flags >> 16, fmt, self.indexfile) | % (flags >> 16, fmt, self.indexfile) | ||||
) | ) | ||||
self._inline = versionflags & FLAG_INLINE_DATA | self._inline = versionflags & FLAG_INLINE_DATA | ||||
# generaldelta implied by version 2 revlogs. | # generaldelta implied by version 2 revlogs. | ||||
self._generaldelta = True | self._generaldelta = True | ||||
else: | else: | ||||
_(b'cannot censor with version %d revlogs') % self.version | _(b'cannot censor with version %d revlogs') % self.version | ||||
) | ) | ||||
censorrev = self.rev(censornode) | censorrev = self.rev(censornode) | ||||
tombstone = storageutil.packmeta({b'censored': tombstone}, b'') | tombstone = storageutil.packmeta({b'censored': tombstone}, b'') | ||||
if len(tombstone) > self.rawsize(censorrev): | if len(tombstone) > self.rawsize(censorrev): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'censor tombstone must be no longer than ' b'censored data') | _(b'censor tombstone must be no longer than censored data') | ||||
) | ) | ||||
# Rewriting the revlog in place is hard. Our strategy for censoring is | # Rewriting the revlog in place is hard. Our strategy for censoring is | ||||
# to create a new revlog, copy all revisions to it, then replace the | # to create a new revlog, copy all revisions to it, then replace the | ||||
# revlogs on transaction close. | # revlogs on transaction close. | ||||
newindexfile = self.indexfile + b'.tmpcensored' | newindexfile = self.indexfile + b'.tmpcensored' | ||||
newdatafile = self.datafile + b'.tmpcensored' | newdatafile = self.datafile + b'.tmpcensored' |
args = getargsdict(x, b'expectsize', b'set size') | args = getargsdict(x, b'expectsize', b'set size') | ||||
minsize = 0 | minsize = 0 | ||||
maxsize = len(repo) + 1 | maxsize = len(repo) + 1 | ||||
err = b'' | err = b'' | ||||
if b'size' not in args or b'set' not in args: | if b'size' not in args or b'set' not in args: | ||||
raise error.ParseError(_(b'invalid set of arguments')) | raise error.ParseError(_(b'invalid set of arguments')) | ||||
minsize, maxsize = getintrange( | minsize, maxsize = getintrange( | ||||
args[b'size'], | args[b'size'], | ||||
_(b'expectsize requires a size range' b' or a positive integer'), | _(b'expectsize requires a size range or a positive integer'), | ||||
_(b'size range bounds must be integers'), | _(b'size range bounds must be integers'), | ||||
minsize, | minsize, | ||||
maxsize, | maxsize, | ||||
) | ) | ||||
if minsize < 0 or maxsize < 0: | if minsize < 0 or maxsize < 0: | ||||
raise error.ParseError(_(b'negative size')) | raise error.ParseError(_(b'negative size')) | ||||
rev = getset(repo, fullreposet(repo), args[b'set'], order=order) | rev = getset(repo, fullreposet(repo), args[b'set'], order=order) | ||||
if minsize != maxsize and (len(rev) < minsize or len(rev) > maxsize): | if minsize != maxsize and (len(rev) < minsize or len(rev) > maxsize): | ||||
err = _( | err = _(b'revset size mismatch. expected between %d and %d, got %d') % ( | ||||
b'revset size mismatch.' b' expected between %d and %d, got %d' | minsize, | ||||
) % (minsize, maxsize, len(rev)) | maxsize, | ||||
len(rev), | |||||
) | |||||
elif minsize == maxsize and len(rev) != minsize: | elif minsize == maxsize and len(rev) != minsize: | ||||
err = _(b'revset size mismatch.' b' expected %d, got %d') % ( | err = _(b'revset size mismatch. expected %d, got %d') % ( | ||||
minsize, | minsize, | ||||
len(rev), | len(rev), | ||||
) | ) | ||||
if err: | if err: | ||||
raise error.RepoLookupError(err) | raise error.RepoLookupError(err) | ||||
if order == followorder: | if order == followorder: | ||||
return subset & rev | return subset & rev | ||||
else: | else: | ||||
:hg:`help revisions.patterns`. | :hg:`help revisions.patterns`. | ||||
""" | """ | ||||
args = getargsdict(x, b'extra', b'label value') | args = getargsdict(x, b'extra', b'label value') | ||||
if b'label' not in args: | if b'label' not in args: | ||||
# i18n: "extra" is a keyword | # i18n: "extra" is a keyword | ||||
raise error.ParseError(_(b'extra takes at least 1 argument')) | raise error.ParseError(_(b'extra takes at least 1 argument')) | ||||
# i18n: "extra" is a keyword | # i18n: "extra" is a keyword | ||||
label = getstring( | label = getstring( | ||||
args[b'label'], _(b'first argument to extra must be ' b'a string') | args[b'label'], _(b'first argument to extra must be a string') | ||||
) | ) | ||||
value = None | value = None | ||||
if b'value' in args: | if b'value' in args: | ||||
# i18n: "extra" is a keyword | # i18n: "extra" is a keyword | ||||
value = getstring( | value = getstring( | ||||
args[b'value'], _(b'second argument to extra must be ' b'a string') | args[b'value'], _(b'second argument to extra must be a string') | ||||
) | ) | ||||
kind, value, matcher = stringutil.stringmatcher(value) | kind, value, matcher = stringutil.stringmatcher(value) | ||||
def _matchvalue(r): | def _matchvalue(r): | ||||
extra = repo[r].extra() | extra = repo[r].extra() | ||||
return label in extra and (value is None or matcher(extra[label])) | return label in extra and (value is None or matcher(extra[label])) | ||||
return subset.filter( | return subset.filter( | ||||
pats.append(value) | pats.append(value) | ||||
elif prefix == b'i:': | elif prefix == b'i:': | ||||
inc.append(value) | inc.append(value) | ||||
elif prefix == b'x:': | elif prefix == b'x:': | ||||
exc.append(value) | exc.append(value) | ||||
elif prefix == b'r:': | elif prefix == b'r:': | ||||
if rev is not None: | if rev is not None: | ||||
raise error.ParseError( | raise error.ParseError( | ||||
b'_matchfiles expected at most one ' b'revision' | b'_matchfiles expected at most one revision' | ||||
) | ) | ||||
if value == b'': # empty means working directory | if value == b'': # empty means working directory | ||||
rev = node.wdirrev | rev = node.wdirrev | ||||
else: | else: | ||||
rev = value | rev = value | ||||
elif prefix == b'd:': | elif prefix == b'd:': | ||||
if default is not None: | if default is not None: | ||||
raise error.ParseError( | raise error.ParseError( | ||||
b'_matchfiles expected at most one ' b'default mode' | b'_matchfiles expected at most one default mode' | ||||
) | ) | ||||
default = value | default = value | ||||
else: | else: | ||||
raise error.ParseError(b'invalid _matchfiles prefix: %s' % prefix) | raise error.ParseError(b'invalid _matchfiles prefix: %s' % prefix) | ||||
if not default: | if not default: | ||||
default = b'glob' | default = b'glob' | ||||
hasset = any(matchmod.patkind(p) == b'set' for p in pats + inc + exc) | hasset = any(matchmod.patkind(p) == b'set' for p in pats + inc + exc) | ||||
revs = getset(repo, fullreposet(repo), l[0]) | revs = getset(repo, fullreposet(repo), l[0]) | ||||
fieldlist = [b'metadata'] | fieldlist = [b'metadata'] | ||||
if len(l) > 1: | if len(l) > 1: | ||||
fieldlist = getstring( | fieldlist = getstring( | ||||
l[1], | l[1], | ||||
# i18n: "matching" is a keyword | # i18n: "matching" is a keyword | ||||
_(b"matching requires a string " b"as its second argument"), | _(b"matching requires a string as its second argument"), | ||||
).split() | ).split() | ||||
# Make sure that there are no repeated fields, | # Make sure that there are no repeated fields, | ||||
# expand the 'special' 'metadata' field type | # expand the 'special' 'metadata' field type | ||||
# and check the 'files' whenever we check the 'diff' | # and check the 'files' whenever we check the 'diff' | ||||
fields = [] | fields = [] | ||||
for field in fieldlist: | for field in fieldlist: | ||||
if field == b'metadata': | if field == b'metadata': | ||||
raise error.ParseError( | raise error.ParseError( | ||||
_(b"unknown sort key %r") % pycompat.bytestr(fk) | _(b"unknown sort key %r") % pycompat.bytestr(fk) | ||||
) | ) | ||||
keyflags.append((k, reverse)) | keyflags.append((k, reverse)) | ||||
if len(keyflags) > 1 and any(k == b'topo' for k, reverse in keyflags): | if len(keyflags) > 1 and any(k == b'topo' for k, reverse in keyflags): | ||||
# i18n: "topo" is a keyword | # i18n: "topo" is a keyword | ||||
raise error.ParseError( | raise error.ParseError( | ||||
_(b'topo sort order cannot be combined ' b'with other sort keys') | _(b'topo sort order cannot be combined with other sort keys') | ||||
) | ) | ||||
opts = {} | opts = {} | ||||
if b'topo.firstbranch' in args: | if b'topo.firstbranch' in args: | ||||
if any(k == b'topo' for k, reverse in keyflags): | if any(k == b'topo' for k, reverse in keyflags): | ||||
opts[b'topo.firstbranch'] = args[b'topo.firstbranch'] | opts[b'topo.firstbranch'] = args[b'topo.firstbranch'] | ||||
else: | else: | ||||
# i18n: "topo" and "topo.firstbranch" are keywords | # i18n: "topo" and "topo.firstbranch" are keywords |
return interactivecommitfunc if interactive else commitfunc | return interactivecommitfunc if interactive else commitfunc | ||||
def _nothingtoshelvemessaging(ui, repo, pats, opts): | def _nothingtoshelvemessaging(ui, repo, pats, opts): | ||||
stat = repo.status(match=scmutil.match(repo[None], pats, opts)) | stat = repo.status(match=scmutil.match(repo[None], pats, opts)) | ||||
if stat.deleted: | if stat.deleted: | ||||
ui.status( | ui.status( | ||||
_(b"nothing changed (%d missing files, see " b"'hg status')\n") | _(b"nothing changed (%d missing files, see 'hg status')\n") | ||||
% len(stat.deleted) | % len(stat.deleted) | ||||
) | ) | ||||
else: | else: | ||||
ui.status(_(b"nothing changed\n")) | ui.status(_(b"nothing changed\n")) | ||||
def _shelvecreatedcommit(repo, node, name, match): | def _shelvecreatedcommit(repo, node, name, match): | ||||
info = {b'node': nodemod.hex(node)} | info = {b'node': nodemod.hex(node)} | ||||
listcmd(ui, repo, pats, opts) | listcmd(ui, repo, pats, opts) | ||||
def checkparents(repo, state): | def checkparents(repo, state): | ||||
"""check parent while resuming an unshelve""" | """check parent while resuming an unshelve""" | ||||
if state.parents != repo.dirstate.parents(): | if state.parents != repo.dirstate.parents(): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'working directory parents do not match unshelve ' b'state') | _(b'working directory parents do not match unshelve state') | ||||
) | ) | ||||
def _loadshelvedstate(ui, repo, opts): | def _loadshelvedstate(ui, repo, opts): | ||||
try: | try: | ||||
state = shelvedstate.load(repo) | state = shelvedstate.load(repo) | ||||
if opts.get(b'keep') is None: | if opts.get(b'keep') is None: | ||||
opts[b'keep'] = state.keep | opts[b'keep'] = state.keep |
return set(), set(), set() | return set(), set(), set() | ||||
raw = repo.vfs.tryread(b'sparse') | raw = repo.vfs.tryread(b'sparse') | ||||
if not raw: | if not raw: | ||||
return set(), set(), set() | return set(), set(), set() | ||||
if rev is None: | if rev is None: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot parse sparse patterns from working ' b'directory') | _(b'cannot parse sparse patterns from working directory') | ||||
) | ) | ||||
includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse') | includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse') | ||||
ctx = repo[rev] | ctx = repo[rev] | ||||
if profiles: | if profiles: | ||||
visited = set() | visited = set() | ||||
while profiles: | while profiles: | ||||
for f in pending: | for f in pending: | ||||
if not sparsematch(f): | if not sparsematch(f): | ||||
repo.ui.warn(_(b"pending changes to '%s'\n") % f) | repo.ui.warn(_(b"pending changes to '%s'\n") % f) | ||||
abort = not force | abort = not force | ||||
if abort: | if abort: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'could not update sparseness due to pending ' b'changes') | _(b'could not update sparseness due to pending changes') | ||||
) | ) | ||||
# Calculate actions | # Calculate actions | ||||
dirstate = repo.dirstate | dirstate = repo.dirstate | ||||
ctx = repo[b'.'] | ctx = repo[b'.'] | ||||
added = [] | added = [] | ||||
lookup = [] | lookup = [] | ||||
dropped = [] | dropped = [] |
# of <alg>:<fingerprint> strings. | # of <alg>:<fingerprint> strings. | ||||
fingerprints = ui.configlist( | fingerprints = ui.configlist( | ||||
b'hostsecurity', b'%s:fingerprints' % bhostname | b'hostsecurity', b'%s:fingerprints' % bhostname | ||||
) | ) | ||||
for fingerprint in fingerprints: | for fingerprint in fingerprints: | ||||
if not (fingerprint.startswith((b'sha1:', b'sha256:', b'sha512:'))): | if not (fingerprint.startswith((b'sha1:', b'sha256:', b'sha512:'))): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'invalid fingerprint for %s: %s') % (bhostname, fingerprint), | _(b'invalid fingerprint for %s: %s') % (bhostname, fingerprint), | ||||
hint=_(b'must begin with "sha1:", "sha256:", ' b'or "sha512:"'), | hint=_(b'must begin with "sha1:", "sha256:", or "sha512:"'), | ||||
) | ) | ||||
alg, fingerprint = fingerprint.split(b':', 1) | alg, fingerprint = fingerprint.split(b':', 1) | ||||
fingerprint = fingerprint.replace(b':', b'').lower() | fingerprint = fingerprint.replace(b':', b'').lower() | ||||
s[b'certfingerprints'].append((alg, fingerprint)) | s[b'certfingerprints'].append((alg, fingerprint)) | ||||
# Fingerprints from [hostfingerprints] are always SHA-1. | # Fingerprints from [hostfingerprints] are always SHA-1. | ||||
for fingerprint in ui.configlist(b'hostfingerprints', bhostname): | for fingerprint in ui.configlist(b'hostfingerprints', bhostname): | ||||
# only (as opposed to multiple versions). So the method for | # only (as opposed to multiple versions). So the method for | ||||
# supporting multiple TLS versions is to use PROTOCOL_SSLv23 and | # supporting multiple TLS versions is to use PROTOCOL_SSLv23 and | ||||
# disable protocols via SSLContext.options and OP_NO_* constants. | # disable protocols via SSLContext.options and OP_NO_* constants. | ||||
# However, SSLContext.options doesn't work unless we have the | # However, SSLContext.options doesn't work unless we have the | ||||
# full/real SSLContext available to us. | # full/real SSLContext available to us. | ||||
if supportedprotocols == {b'tls1.0'}: | if supportedprotocols == {b'tls1.0'}: | ||||
if protocol != b'tls1.0': | if protocol != b'tls1.0': | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'current Python does not support protocol ' b'setting %s') | _(b'current Python does not support protocol setting %s') | ||||
% protocol, | % protocol, | ||||
hint=_( | hint=_( | ||||
b'upgrade Python or disable setting since ' | b'upgrade Python or disable setting since ' | ||||
b'only TLS 1.0 is supported' | b'only TLS 1.0 is supported' | ||||
), | ), | ||||
) | ) | ||||
return ssl.PROTOCOL_TLSv1, 0, b'tls1.0' | return ssl.PROTOCOL_TLSv1, 0, b'tls1.0' | ||||
Typically ``cafile`` is only defined if ``requireclientcert`` is true. | Typically ``cafile`` is only defined if ``requireclientcert`` is true. | ||||
""" | """ | ||||
# This function is not used much by core Mercurial, so the error messaging | # This function is not used much by core Mercurial, so the error messaging | ||||
# doesn't have to be as detailed as for wrapsocket(). | # doesn't have to be as detailed as for wrapsocket(). | ||||
for f in (certfile, keyfile, cafile): | for f in (certfile, keyfile, cafile): | ||||
if f and not os.path.exists(f): | if f and not os.path.exists(f): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'referenced certificate file (%s) does not ' b'exist') % f | _(b'referenced certificate file (%s) does not exist') % f | ||||
) | ) | ||||
protocol, options, _protocolui = protocolsettings(b'tls1.0') | protocol, options, _protocolui = protocolsettings(b'tls1.0') | ||||
# This config option is intended for use in tests only. It is a giant | # This config option is intended for use in tests only. It is a giant | ||||
# footgun to kill security. Don't define it. | # footgun to kill security. Don't define it. | ||||
exactprotocol = ui.config(b'devel', b'serverexactprotocol') | exactprotocol = ui.config(b'devel', b'serverexactprotocol') | ||||
if exactprotocol == b'tls1.0': | if exactprotocol == b'tls1.0': | ||||
try: | try: | ||||
peercert = sock.getpeercert(True) | peercert = sock.getpeercert(True) | ||||
peercert2 = sock.getpeercert() | peercert2 = sock.getpeercert() | ||||
except AttributeError: | except AttributeError: | ||||
raise error.Abort(_(b'%s ssl connection error') % host) | raise error.Abort(_(b'%s ssl connection error') % host) | ||||
if not peercert: | if not peercert: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'%s certificate error: ' b'no certificate received') % host | _(b'%s certificate error: no certificate received') % host | ||||
) | ) | ||||
if settings[b'disablecertverification']: | if settings[b'disablecertverification']: | ||||
# We don't print the certificate fingerprint because it shouldn't | # We don't print the certificate fingerprint because it shouldn't | ||||
# be necessary: if the user requested certificate verification be | # be necessary: if the user requested certificate verification be | ||||
# disabled, they presumably already saw a message about the inability | # disabled, they presumably already saw a message about the inability | ||||
# to verify the certificate and this message would have printed the | # to verify the certificate and this message would have printed the | ||||
# fingerprint. So printing the fingerprint here adds little to no | # fingerprint. So printing the fingerprint here adds little to no | ||||
# Pinned fingerprint didn't match. This is a fatal error. | # Pinned fingerprint didn't match. This is a fatal error. | ||||
if settings[b'legacyfingerprint']: | if settings[b'legacyfingerprint']: | ||||
section = b'hostfingerprint' | section = b'hostfingerprint' | ||||
nice = fmtfingerprint(peerfingerprints[b'sha1']) | nice = fmtfingerprint(peerfingerprints[b'sha1']) | ||||
else: | else: | ||||
section = b'hostsecurity' | section = b'hostsecurity' | ||||
nice = b'%s:%s' % (hash, fmtfingerprint(peerfingerprints[hash])) | nice = b'%s:%s' % (hash, fmtfingerprint(peerfingerprints[hash])) | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'certificate for %s has unexpected ' b'fingerprint %s') | _(b'certificate for %s has unexpected fingerprint %s') | ||||
% (host, nice), | % (host, nice), | ||||
hint=_(b'check %s configuration') % section, | hint=_(b'check %s configuration') % section, | ||||
) | ) | ||||
# Security is enabled but no CAs are loaded. We can't establish trust | # Security is enabled but no CAs are loaded. We can't establish trust | ||||
# for the cert so abort. | # for the cert so abort. | ||||
if not sock._hgstate[b'caloaded']: | if not sock._hgstate[b'caloaded']: | ||||
raise error.Abort( | raise error.Abort( |
def save(self, version, data): | def save(self, version, data): | ||||
"""write all the state data stored to .hg/<filename> file | """write all the state data stored to .hg/<filename> file | ||||
we use third-party library cbor to serialize data to write in the file. | we use third-party library cbor to serialize data to write in the file. | ||||
""" | """ | ||||
if not isinstance(version, int): | if not isinstance(version, int): | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b"version of state file should be" b" an integer" | b"version of state file should be an integer" | ||||
) | ) | ||||
with self._repo.vfs(self.fname, b'wb', atomictemp=True) as fp: | with self._repo.vfs(self.fname, b'wb', atomictemp=True) as fp: | ||||
fp.write(b'%d\n' % version) | fp.write(b'%d\n' % version) | ||||
for chunk in cborutil.streamencode(data): | for chunk in cborutil.streamencode(data): | ||||
fp.write(chunk) | fp.write(chunk) | ||||
def _read(self): | def _read(self): | ||||
"""reads the state file and returns a dictionary which contain | """reads the state file and returns a dictionary which contain | ||||
data in the same format as it was before storing""" | data in the same format as it was before storing""" | ||||
with self._repo.vfs(self.fname, b'rb') as fp: | with self._repo.vfs(self.fname, b'rb') as fp: | ||||
try: | try: | ||||
int(fp.readline()) | int(fp.readline()) | ||||
except ValueError: | except ValueError: | ||||
raise error.CorruptedState( | raise error.CorruptedState( | ||||
b"unknown version of state file" b" found" | b"unknown version of state file found" | ||||
) | ) | ||||
return cborutil.decodeall(fp.read())[0] | return cborutil.decodeall(fp.read())[0] | ||||
def delete(self): | def delete(self): | ||||
"""drop the state file if exists""" | """drop the state file if exists""" | ||||
util.unlinkpath(self._repo.vfs.join(self.fname), ignoremissing=True) | util.unlinkpath(self._repo.vfs.join(self.fname), ignoremissing=True) | ||||
def applybundlev1(repo, fp): | def applybundlev1(repo, fp): | ||||
"""Apply the content from a stream clone bundle version 1. | """Apply the content from a stream clone bundle version 1. | ||||
We assume the 4 byte header has been read and validated and the file handle | We assume the 4 byte header has been read and validated and the file handle | ||||
is at the 2 byte compression identifier. | is at the 2 byte compression identifier. | ||||
""" | """ | ||||
if len(repo): | if len(repo): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot apply stream clone bundle on non-empty ' b'repo') | _(b'cannot apply stream clone bundle on non-empty repo') | ||||
) | ) | ||||
filecount, bytecount, requirements = readbundle1header(fp) | filecount, bytecount, requirements = readbundle1header(fp) | ||||
missingreqs = requirements - repo.supportedformats | missingreqs = requirements - repo.supportedformats | ||||
if missingreqs: | if missingreqs: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'unable to apply stream clone: ' b'unsupported format: %s') | _(b'unable to apply stream clone: unsupported format: %s') | ||||
% b', '.join(sorted(missingreqs)) | % b', '.join(sorted(missingreqs)) | ||||
) | ) | ||||
consumev1(repo, fp, filecount, bytecount) | consumev1(repo, fp, filecount, bytecount) | ||||
class streamcloneapplier(object): | class streamcloneapplier(object): | ||||
"""Class to manage applying streaming clone bundles. | """Class to manage applying streaming clone bundles. | ||||
def applybundlev2(repo, fp, filecount, filesize, requirements): | def applybundlev2(repo, fp, filecount, filesize, requirements): | ||||
from . import localrepo | from . import localrepo | ||||
missingreqs = [r for r in requirements if r not in repo.supported] | missingreqs = [r for r in requirements if r not in repo.supported] | ||||
if missingreqs: | if missingreqs: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'unable to apply stream clone: ' b'unsupported format: %s') | _(b'unable to apply stream clone: unsupported format: %s') | ||||
% b', '.join(sorted(missingreqs)) | % b', '.join(sorted(missingreqs)) | ||||
) | ) | ||||
consumev2(repo, fp, filecount, filesize) | consumev2(repo, fp, filecount, filesize) | ||||
# new requirements = old non-format requirements + | # new requirements = old non-format requirements + | ||||
# new format-related remote requirements | # new format-related remote requirements | ||||
# requirements from the streamed-in repository | # requirements from the streamed-in repository | ||||
repo.requirements = set(requirements) | ( | repo.requirements = set(requirements) | ( | ||||
repo.requirements - repo.supportedformats | repo.requirements - repo.supportedformats | ||||
) | ) | ||||
repo.svfs.options = localrepo.resolvestorevfsoptions( | repo.svfs.options = localrepo.resolvestorevfsoptions( | ||||
repo.ui, repo.requirements, repo.features | repo.ui, repo.requirements, repo.features | ||||
) | ) | ||||
repo._writerequirements() | repo._writerequirements() |
newrev = newrev.groups()[0] | newrev = newrev.groups()[0] | ||||
self.ui.status(self._svncommand([b'update', b'-r', newrev])[0]) | self.ui.status(self._svncommand([b'update', b'-r', newrev])[0]) | ||||
return newrev | return newrev | ||||
@annotatesubrepoerror | @annotatesubrepoerror | ||||
def remove(self): | def remove(self): | ||||
if self.dirty(): | if self.dirty(): | ||||
self.ui.warn( | self.ui.warn( | ||||
_(b'not removing repo %s because ' b'it has changes.\n') | _(b'not removing repo %s because it has changes.\n') | ||||
% self._path | % self._path | ||||
) | ) | ||||
return | return | ||||
self.ui.note(_(b'removing subrepo %s\n') % self._path) | self.ui.note(_(b'removing subrepo %s\n') % self._path) | ||||
self.wvfs.rmtree(forcibly=True) | self.wvfs.rmtree(forcibly=True) | ||||
try: | try: | ||||
pwvfs = self._ctx.repo().wvfs | pwvfs = self._ctx.repo().wvfs | ||||
self.ui.status( | self.ui.status( | ||||
_(b'pulling subrepo %s from %s\n') | _(b'pulling subrepo %s from %s\n') | ||||
% (self._relpath, self._gitremote(b'origin')) | % (self._relpath, self._gitremote(b'origin')) | ||||
) | ) | ||||
# try only origin: the originally cloned repo | # try only origin: the originally cloned repo | ||||
self._gitcommand([b'fetch']) | self._gitcommand([b'fetch']) | ||||
if not self._githavelocally(revision): | if not self._githavelocally(revision): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'revision %s does not exist in subrepository ' b'"%s"\n') | _(b'revision %s does not exist in subrepository "%s"\n') | ||||
% (revision, self._relpath) | % (revision, self._relpath) | ||||
) | ) | ||||
@annotatesubrepoerror | @annotatesubrepoerror | ||||
def dirty(self, ignoreupdate=False, missing=False): | def dirty(self, ignoreupdate=False, missing=False): | ||||
if self._gitmissing(): | if self._gitmissing(): | ||||
return self._state[1] != b'' | return self._state[1] != b'' | ||||
if self._gitisbare(): | if self._gitisbare(): | ||||
self._gitcommand([b'reset', b'HEAD']) | self._gitcommand([b'reset', b'HEAD']) | ||||
cmd.append(b'-f') | cmd.append(b'-f') | ||||
self._gitcommand(cmd + args) | self._gitcommand(cmd + args) | ||||
_sanitize(self.ui, self.wvfs, b'.git') | _sanitize(self.ui, self.wvfs, b'.git') | ||||
def rawcheckout(): | def rawcheckout(): | ||||
# no branch to checkout, check it out with no branch | # no branch to checkout, check it out with no branch | ||||
self.ui.warn( | self.ui.warn( | ||||
_(b'checking out detached HEAD in ' b'subrepository "%s"\n') | _(b'checking out detached HEAD in subrepository "%s"\n') | ||||
% self._relpath | % self._relpath | ||||
) | ) | ||||
self.ui.warn( | self.ui.warn( | ||||
_(b'check out a git branch if you intend ' b'to make changes\n') | _(b'check out a git branch if you intend to make changes\n') | ||||
) | ) | ||||
checkout([b'-q', revision]) | checkout([b'-q', revision]) | ||||
if revision not in rev2branch: | if revision not in rev2branch: | ||||
rawcheckout() | rawcheckout() | ||||
return | return | ||||
branches = rev2branch[revision] | branches = rev2branch[revision] | ||||
firstlocalbranch = None | firstlocalbranch = None | ||||
return rejected | return rejected | ||||
@annotatesubrepoerror | @annotatesubrepoerror | ||||
def remove(self): | def remove(self): | ||||
if self._gitmissing(): | if self._gitmissing(): | ||||
return | return | ||||
if self.dirty(): | if self.dirty(): | ||||
self.ui.warn( | self.ui.warn( | ||||
_(b'not removing repo %s because ' b'it has changes.\n') | _(b'not removing repo %s because it has changes.\n') | ||||
% self._relpath | % self._relpath | ||||
) | ) | ||||
return | return | ||||
# we can't fully delete the repository as it may contain | # we can't fully delete the repository as it may contain | ||||
# local-only history | # local-only history | ||||
self.ui.note(_(b'removing subrepo %s\n') % self._relpath) | self.ui.note(_(b'removing subrepo %s\n') % self._relpath) | ||||
self._gitcommand([b'config', b'core.bare', b'true']) | self._gitcommand([b'config', b'core.bare', b'true']) | ||||
for f, kind in self.wvfs.readdir(): | for f, kind in self.wvfs.readdir(): |
if isinstance(names, bytes): | if isinstance(names, bytes): | ||||
names = (names,) | names = (names,) | ||||
branches = repo.branchmap() | branches = repo.branchmap() | ||||
for name in names: | for name in names: | ||||
repo.hook(b'pretag', throw=True, node=hex(node), tag=name, local=local) | repo.hook(b'pretag', throw=True, node=hex(node), tag=name, local=local) | ||||
if name in branches: | if name in branches: | ||||
repo.ui.warn( | repo.ui.warn( | ||||
_(b"warning: tag %s conflicts with existing" b" branch name\n") | _(b"warning: tag %s conflicts with existing branch name\n") | ||||
% name | % name | ||||
) | ) | ||||
def writetags(fp, names, munge, prevtags): | def writetags(fp, names, munge, prevtags): | ||||
fp.seek(0, io.SEEK_END) | fp.seek(0, io.SEEK_END) | ||||
if prevtags and not prevtags.endswith(b'\n'): | if prevtags and not prevtags.endswith(b'\n'): | ||||
fp.write(b'\n') | fp.write(b'\n') | ||||
for name in names: | for name in names: |
default = b'zlib' | default = b'zlib' | ||||
description = _( | description = _( | ||||
b'Compresion algorithm used to compress data. ' | b'Compresion algorithm used to compress data. ' | ||||
b'Some engine are faster than other' | b'Some engine are faster than other' | ||||
) | ) | ||||
upgrademessage = _( | upgrademessage = _( | ||||
b'revlog content will be recompressed with the new ' b'algorithm.' | b'revlog content will be recompressed with the new algorithm.' | ||||
) | ) | ||||
@classmethod | @classmethod | ||||
def fromrepo(cls, repo): | def fromrepo(cls, repo): | ||||
# we allow multiple compression engine requirement to co-exist because | # we allow multiple compression engine requirement to co-exist because | ||||
# strickly speaking, revlog seems to support mixed compression style. | # strickly speaking, revlog seems to support mixed compression style. | ||||
# | # | ||||
# The compression used for new entries will be "the last one" | # The compression used for new entries will be "the last one" | ||||
revlogs.discard(UPGRADE_CHANGELOG) | revlogs.discard(UPGRADE_CHANGELOG) | ||||
elif r == b'm': | elif r == b'm': | ||||
revlogs.discard(UPGRADE_MANIFEST) | revlogs.discard(UPGRADE_MANIFEST) | ||||
# Ensure the repository can be upgraded. | # Ensure the repository can be upgraded. | ||||
missingreqs = requiredsourcerequirements(repo) - repo.requirements | missingreqs = requiredsourcerequirements(repo) - repo.requirements | ||||
if missingreqs: | if missingreqs: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot upgrade repository; requirement ' b'missing: %s') | _(b'cannot upgrade repository; requirement missing: %s') | ||||
% _(b', ').join(sorted(missingreqs)) | % _(b', ').join(sorted(missingreqs)) | ||||
) | ) | ||||
blockedreqs = blocksourcerequirements(repo) & repo.requirements | blockedreqs = blocksourcerequirements(repo) & repo.requirements | ||||
if blockedreqs: | if blockedreqs: | ||||
raise error.Abort( | raise error.Abort( | ||||
_( | _( | ||||
b'cannot upgrade repository; unsupported source ' | b'cannot upgrade repository; unsupported source ' | ||||
if o.name in optimize: | if o.name in optimize: | ||||
optimizations.append(o) | optimizations.append(o) | ||||
optimize.discard(o.name) | optimize.discard(o.name) | ||||
if optimize: # anything left is unknown | if optimize: # anything left is unknown | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'unknown optimization action requested: %s') | _(b'unknown optimization action requested: %s') | ||||
% b', '.join(sorted(optimize)), | % b', '.join(sorted(optimize)), | ||||
hint=_(b'run without arguments to see valid ' b'optimizations'), | hint=_(b'run without arguments to see valid optimizations'), | ||||
) | ) | ||||
deficiencies = finddeficiencies(repo) | deficiencies = finddeficiencies(repo) | ||||
actions = determineactions(repo, deficiencies, repo.requirements, newreqs) | actions = determineactions(repo, deficiencies, repo.requirements, newreqs) | ||||
actions.extend( | actions.extend( | ||||
o | o | ||||
for o in sorted(optimizations) | for o in sorted(optimizations) | ||||
# determineactions could have added optimisation | # determineactions could have added optimisation |
_( | _( | ||||
b"filename contains '%s', which is reserved " | b"filename contains '%s', which is reserved " | ||||
b"on Windows" | b"on Windows" | ||||
) | ) | ||||
% c | % c | ||||
) | ) | ||||
if ord(c) <= 31: | if ord(c) <= 31: | ||||
return _( | return _( | ||||
b"filename contains '%s', which is invalid " b"on Windows" | b"filename contains '%s', which is invalid on Windows" | ||||
) % stringutil.escapestr(c) | ) % stringutil.escapestr(c) | ||||
base = n.split(b'.')[0] | base = n.split(b'.')[0] | ||||
if base and base.lower() in _winreservednames: | if base and base.lower() in _winreservednames: | ||||
return ( | return ( | ||||
_(b"filename contains '%s', which is reserved " b"on Windows") | _(b"filename contains '%s', which is reserved on Windows") | ||||
% base | % base | ||||
) | ) | ||||
t = n[-1:] | t = n[-1:] | ||||
if t in b'. ' and n not in b'..': | if t in b'. ' and n not in b'..': | ||||
return ( | return ( | ||||
_( | _( | ||||
b"filename ends with '%s', which is not allowed " | b"filename ends with '%s', which is not allowed " | ||||
b"on Windows" | b"on Windows" | ||||
self._dirs = {} | self._dirs = {} | ||||
addpath = self.addpath | addpath = self.addpath | ||||
if isinstance(map, dict) and skip is not None: | if isinstance(map, dict) and skip is not None: | ||||
for f, s in pycompat.iteritems(map): | for f, s in pycompat.iteritems(map): | ||||
if s[0] != skip: | if s[0] != skip: | ||||
addpath(f) | addpath(f) | ||||
elif skip is not None: | elif skip is not None: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b"skip character is only supported " b"with a dict source" | b"skip character is only supported with a dict source" | ||||
) | ) | ||||
else: | else: | ||||
for f in map: | for f in map: | ||||
addpath(f) | addpath(f) | ||||
def addpath(self, path): | def addpath(self, path): | ||||
dirs = self._dirs | dirs = self._dirs | ||||
for base in finddirs(path): | for base in finddirs(path): | ||||
return fn | return fn | ||||
def readexactly(stream, n): | def readexactly(stream, n): | ||||
'''read n bytes from stream.read and abort if less was available''' | '''read n bytes from stream.read and abort if less was available''' | ||||
s = stream.read(n) | s = stream.read(n) | ||||
if len(s) < n: | if len(s) < n: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"stream ended unexpectedly" b" (got %d bytes, expected %d)") | _(b"stream ended unexpectedly (got %d bytes, expected %d)") | ||||
% (len(s), n) | % (len(s), n) | ||||
) | ) | ||||
return s | return s | ||||
def uvarintencode(value): | def uvarintencode(value): | ||||
"""Encode an unsigned integer value to a varint. | """Encode an unsigned integer value to a varint. | ||||
b, offset + readcount | b, offset + readcount | ||||
) | ) | ||||
if not complete: | if not complete: | ||||
return False, None, readcount2, SPECIAL_NONE | return False, None, readcount2, SPECIAL_NONE | ||||
if special != SPECIAL_START_ARRAY: | if special != SPECIAL_START_ARRAY: | ||||
raise CBORDecodeError( | raise CBORDecodeError( | ||||
b'expected array after finite set ' b'semantic tag' | b'expected array after finite set semantic tag' | ||||
) | ) | ||||
return True, size, readcount + readcount2 + 1, SPECIAL_START_SET | return True, size, readcount + readcount2 + 1, SPECIAL_START_SET | ||||
else: | else: | ||||
raise CBORDecodeError(b'semantic tag %d not allowed' % tagvalue) | raise CBORDecodeError(b'semantic tag %d not allowed' % tagvalue) | ||||
elif majortype == MAJOR_TYPE_SPECIAL: | elif majortype == MAJOR_TYPE_SPECIAL: | ||||
) | ) | ||||
elif special in ( | elif special in ( | ||||
SPECIAL_START_ARRAY, | SPECIAL_START_ARRAY, | ||||
SPECIAL_START_MAP, | SPECIAL_START_MAP, | ||||
SPECIAL_START_SET, | SPECIAL_START_SET, | ||||
): | ): | ||||
raise CBORDecodeError( | raise CBORDecodeError( | ||||
b'collections not supported as map ' b'keys' | b'collections not supported as map keys' | ||||
) | ) | ||||
# We do not allow special values to be used as map keys. | # We do not allow special values to be used as map keys. | ||||
else: | else: | ||||
raise CBORDecodeError( | raise CBORDecodeError( | ||||
b'unhandled special item when ' | b'unhandled special item when ' | ||||
b'expecting map key: %d' % special | b'expecting map key: %d' % special | ||||
) | ) | ||||
) | ) | ||||
elif special in ( | elif special in ( | ||||
SPECIAL_START_ARRAY, | SPECIAL_START_ARRAY, | ||||
SPECIAL_START_MAP, | SPECIAL_START_MAP, | ||||
SPECIAL_START_SET, | SPECIAL_START_SET, | ||||
): | ): | ||||
raise CBORDecodeError( | raise CBORDecodeError( | ||||
b'collections not allowed as set ' b'values' | b'collections not allowed as set values' | ||||
) | ) | ||||
# We don't allow non-trivial types to exist as set values. | # We don't allow non-trivial types to exist as set values. | ||||
else: | else: | ||||
raise CBORDecodeError( | raise CBORDecodeError( | ||||
b'unhandled special item when ' | b'unhandled special item when ' | ||||
b'expecting set value: %d' % special | b'expecting set value: %d' % special | ||||
) | ) |
except pycompat.queue.Empty: | except pycompat.queue.Empty: | ||||
if not self._running: | if not self._running: | ||||
break | break | ||||
def close(self, fh): | def close(self, fh): | ||||
"""Schedule a file for closing.""" | """Schedule a file for closing.""" | ||||
if not self._entered: | if not self._entered: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'can only call close() when context manager ' b'active') | _(b'can only call close() when context manager active') | ||||
) | ) | ||||
# If a background thread encountered an exception, raise now so we fail | # If a background thread encountered an exception, raise now so we fail | ||||
# fast. Otherwise we may potentially go on for minutes until the error | # fast. Otherwise we may potentially go on for minutes until the error | ||||
# is acted on. | # is acted on. | ||||
if self._threadexception: | if self._threadexception: | ||||
e = self._threadexception | e = self._threadexception | ||||
self._threadexception = None | self._threadexception = None |
class identitydecoder(object): | class identitydecoder(object): | ||||
"""Decoder for the "identity" stream encoding profile.""" | """Decoder for the "identity" stream encoding profile.""" | ||||
def __init__(self, ui, extraobjs): | def __init__(self, ui, extraobjs): | ||||
if extraobjs: | if extraobjs: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'identity decoder received unexpected ' b'additional values') | _(b'identity decoder received unexpected additional values') | ||||
) | ) | ||||
def decode(self, data): | def decode(self, data): | ||||
return data | return data | ||||
class zlibencoder(object): | class zlibencoder(object): | ||||
def __init__(self, ui): | def __init__(self, ui): | ||||
class zlibdecoder(object): | class zlibdecoder(object): | ||||
def __init__(self, ui, extraobjs): | def __init__(self, ui, extraobjs): | ||||
import zlib | import zlib | ||||
if extraobjs: | if extraobjs: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'zlib decoder received unexpected ' b'additional values') | _(b'zlib decoder received unexpected additional values') | ||||
) | ) | ||||
self._decompressor = zlib.decompressobj() | self._decompressor = zlib.decompressobj() | ||||
def decode(self, data): | def decode(self, data): | ||||
# Python 2's zlib module doesn't use the buffer protocol and can't | # Python 2's zlib module doesn't use the buffer protocol and can't | ||||
# handle all bytes-like types. | # handle all bytes-like types. | ||||
if not pycompat.ispy3 and isinstance(data, bytearray): | if not pycompat.ispy3 and isinstance(data, bytearray): | ||||
def decode(self, data): | def decode(self, data): | ||||
return self._decompressor.decompress(data) | return self._decompressor.decompress(data) | ||||
class zstd8mbdecoder(zstdbasedecoder): | class zstd8mbdecoder(zstdbasedecoder): | ||||
def __init__(self, ui, extraobjs): | def __init__(self, ui, extraobjs): | ||||
if extraobjs: | if extraobjs: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'zstd8mb decoder received unexpected ' b'additional values') | _(b'zstd8mb decoder received unexpected additional values') | ||||
) | ) | ||||
super(zstd8mbdecoder, self).__init__(maxwindowsize=8 * 1048576) | super(zstd8mbdecoder, self).__init__(maxwindowsize=8 * 1048576) | ||||
# We lazily populate this to avoid excessive module imports when importing | # We lazily populate this to avoid excessive module imports when importing | ||||
# this module. | # this module. | ||||
STREAM_ENCODERS = {} | STREAM_ENCODERS = {} | ||||
) | ) | ||||
self._incomingstreams[frame.streamid] = inputstream(frame.streamid) | self._incomingstreams[frame.streamid] = inputstream(frame.streamid) | ||||
if frame.streamflags & STREAM_FLAG_ENCODING_APPLIED: | if frame.streamflags & STREAM_FLAG_ENCODING_APPLIED: | ||||
# TODO handle decoding frames | # TODO handle decoding frames | ||||
self._state = b'errored' | self._state = b'errored' | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'support for decoding stream payloads ' b'not yet implemented' | b'support for decoding stream payloads not yet implemented' | ||||
) | ) | ||||
if frame.streamflags & STREAM_FLAG_END_STREAM: | if frame.streamflags & STREAM_FLAG_END_STREAM: | ||||
del self._incomingstreams[frame.streamid] | del self._incomingstreams[frame.streamid] | ||||
handlers = { | handlers = { | ||||
b'initial': self._onframeinitial, | b'initial': self._onframeinitial, | ||||
b'protocol-settings-receiving': self._onframeprotocolsettings, | b'protocol-settings-receiving': self._onframeprotocolsettings, | ||||
return b'error', {b'message': msg,} | return b'error', {b'message': msg,} | ||||
def _makeruncommandresult(self, requestid): | def _makeruncommandresult(self, requestid): | ||||
entry = self._receivingcommands[requestid] | entry = self._receivingcommands[requestid] | ||||
if not entry[b'requestdone']: | if not entry[b'requestdone']: | ||||
self._state = b'errored' | self._state = b'errored' | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'should not be called without ' b'requestdone set' | b'should not be called without requestdone set' | ||||
) | ) | ||||
del self._receivingcommands[requestid] | del self._receivingcommands[requestid] | ||||
if self._receivingcommands: | if self._receivingcommands: | ||||
self._state = b'command-receiving' | self._state = b'command-receiving' | ||||
else: | else: | ||||
self._state = b'idle' | self._state = b'idle' | ||||
if frame.flags & FLAG_COMMAND_DATA_CONTINUATION: | if frame.flags & FLAG_COMMAND_DATA_CONTINUATION: | ||||
return self._makewantframeresult() | return self._makewantframeresult() | ||||
elif frame.flags & FLAG_COMMAND_DATA_EOS: | elif frame.flags & FLAG_COMMAND_DATA_EOS: | ||||
entry[b'data'].seek(0) | entry[b'data'].seek(0) | ||||
return self._makeruncommandresult(frame.requestid) | return self._makeruncommandresult(frame.requestid) | ||||
else: | else: | ||||
self._state = b'errored' | self._state = b'errored' | ||||
return self._makeerrorresult( | return self._makeerrorresult(_(b'command data frame without flags')) | ||||
_(b'command data frame without ' b'flags') | |||||
) | |||||
def _onframeerrored(self, frame): | def _onframeerrored(self, frame): | ||||
return self._makeerrorresult(_(b'server already errored')) | return self._makeerrorresult(_(b'server already errored')) | ||||
class commandrequest(object): | class commandrequest(object): | ||||
"""Represents a request to run a command.""" | """Represents a request to run a command.""" | ||||
) | ) | ||||
if self._buffersends: | if self._buffersends: | ||||
self._pendingrequests.append(request) | self._pendingrequests.append(request) | ||||
return request, b'noop', {} | return request, b'noop', {} | ||||
else: | else: | ||||
if not self._cansend: | if not self._cansend: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'sends cannot be performed on ' b'this instance' | b'sends cannot be performed on this instance' | ||||
) | ) | ||||
if not self._hasmultiplesend: | if not self._hasmultiplesend: | ||||
self._cansend = False | self._cansend = False | ||||
self._canissuecommands = False | self._canissuecommands = False | ||||
return ( | return ( | ||||
request, | request, | ||||
If instances aren't configured for multiple sends, no new command | If instances aren't configured for multiple sends, no new command | ||||
requests are allowed after this is called. | requests are allowed after this is called. | ||||
""" | """ | ||||
if not self._pendingrequests: | if not self._pendingrequests: | ||||
return b'noop', {} | return b'noop', {} | ||||
if not self._cansend: | if not self._cansend: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'sends cannot be performed on this ' b'instance' | b'sends cannot be performed on this instance' | ||||
) | ) | ||||
# If the instance only allows sending once, mark that we have fired | # If the instance only allows sending once, mark that we have fired | ||||
# our one shot. | # our one shot. | ||||
if not self._hasmultiplesend: | if not self._hasmultiplesend: | ||||
self._canissuecommands = False | self._canissuecommands = False | ||||
self._cansend = False | self._cansend = False | ||||
# It looks like a protocol upgrade request. Transition state to | # It looks like a protocol upgrade request. Transition state to | ||||
# handle it. | # handle it. | ||||
if request.startswith(b'upgrade '): | if request.startswith(b'upgrade '): | ||||
if protoswitched: | if protoswitched: | ||||
_sshv1respondooberror( | _sshv1respondooberror( | ||||
fout, | fout, | ||||
ui.ferr, | ui.ferr, | ||||
b'cannot upgrade protocols multiple ' b'times', | b'cannot upgrade protocols multiple times', | ||||
) | ) | ||||
state = b'shutdown' | state = b'shutdown' | ||||
continue | continue | ||||
state = b'upgrade-initial' | state = b'upgrade-initial' | ||||
continue | continue | ||||
available = wireprotov1server.commands.commandavailable( | available = wireprotov1server.commands.commandavailable( | ||||
ok = True | ok = True | ||||
for line in (b'hello', b'between', b'pairs 81'): | for line in (b'hello', b'between', b'pairs 81'): | ||||
request = fin.readline()[:-1] | request = fin.readline()[:-1] | ||||
if request != line: | if request != line: | ||||
_sshv1respondooberror( | _sshv1respondooberror( | ||||
fout, | fout, | ||||
ui.ferr, | ui.ferr, | ||||
b'malformed handshake protocol: ' b'missing %s' % line, | b'malformed handshake protocol: missing %s' % line, | ||||
) | ) | ||||
ok = False | ok = False | ||||
state = b'shutdown' | state = b'shutdown' | ||||
break | break | ||||
if not ok: | if not ok: | ||||
continue | continue | ||||
return self | return self | ||||
def __exit__(self, exctype, excvalee, exctb): | def __exit__(self, exctype, excvalee, exctb): | ||||
self.close() | self.close() | ||||
def callcommand(self, command, args): | def callcommand(self, command, args): | ||||
if self._sent: | if self._sent: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'callcommand() cannot be used ' b'after commands are sent' | b'callcommand() cannot be used after commands are sent' | ||||
) | ) | ||||
if self._closed: | if self._closed: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'callcommand() cannot be used ' b'after close()' | b'callcommand() cannot be used after close()' | ||||
) | ) | ||||
# Commands are dispatched through methods on the peer. | # Commands are dispatched through methods on the peer. | ||||
fn = getattr(self._peer, pycompat.sysstr(command), None) | fn = getattr(self._peer, pycompat.sysstr(command), None) | ||||
if not fn: | if not fn: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'cannot call command %s: method of same name not available ' | b'cannot call command %s: method of same name not available ' |
stringutil, | stringutil, | ||||
) | ) | ||||
urlerr = util.urlerr | urlerr = util.urlerr | ||||
urlreq = util.urlreq | urlreq = util.urlreq | ||||
bundle2requiredmain = _(b'incompatible Mercurial client; bundle2 required') | bundle2requiredmain = _(b'incompatible Mercurial client; bundle2 required') | ||||
bundle2requiredhint = _( | bundle2requiredhint = _( | ||||
b'see https://www.mercurial-scm.org/wiki/' b'IncompatibleClient' | b'see https://www.mercurial-scm.org/wiki/IncompatibleClient' | ||||
) | ) | ||||
bundle2required = b'%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint) | bundle2required = b'%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint) | ||||
def clientcompressionsupport(proto): | def clientcompressionsupport(proto): | ||||
"""Returns a list of compression methods supported by the client. | """Returns a list of compression methods supported by the client. | ||||
Returns a list of the compression methods supported by the client | Returns a list of the compression methods supported by the client | ||||
b'got %s; expected "push" or "pull"' % permission | b'got %s; expected "push" or "pull"' % permission | ||||
) | ) | ||||
if args is None: | if args is None: | ||||
args = b'' | args = b'' | ||||
if not isinstance(args, bytes): | if not isinstance(args, bytes): | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'arguments for version 1 commands ' b'must be declared as bytes' | b'arguments for version 1 commands must be declared as bytes' | ||||
) | ) | ||||
def register(func): | def register(func): | ||||
if name in commands: | if name in commands: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'%s command already registered ' b'for version 1' % name | b'%s command already registered for version 1' % name | ||||
) | ) | ||||
commands[name] = wireprototypes.commandentry( | commands[name] = wireprototypes.commandentry( | ||||
func, args=args, transports=transports, permission=permission | func, args=args, transports=transports, permission=permission | ||||
) | ) | ||||
return func | return func | ||||
return register | return register |
return | return | ||||
if req.headers.get(b'Content-Type') != FRAMINGTYPE: | if req.headers.get(b'Content-Type') != FRAMINGTYPE: | ||||
res.status = b'415 Unsupported Media Type' | res.status = b'415 Unsupported Media Type' | ||||
# TODO we should send a response with appropriate media type, | # TODO we should send a response with appropriate media type, | ||||
# since client does Accept it. | # since client does Accept it. | ||||
res.headers[b'Content-Type'] = b'text/plain' | res.headers[b'Content-Type'] = b'text/plain' | ||||
res.setbodybytes( | res.setbodybytes( | ||||
_(b'client MUST send Content-Type header with ' b'value: %s\n') | _(b'client MUST send Content-Type header with value: %s\n') | ||||
% FRAMINGTYPE | % FRAMINGTYPE | ||||
) | ) | ||||
return | return | ||||
_processhttpv2request(ui, repo, req, res, permission, command, proto) | _processhttpv2request(ui, repo, req, res, permission, command, proto) | ||||
def _processhttpv2reflectrequest(ui, repo, req, res): | def _processhttpv2reflectrequest(ui, repo, req, res): | ||||
wirecommand = COMMANDS[command[b'command']] | wirecommand = COMMANDS[command[b'command']] | ||||
assert wirecommand.permission in (b'push', b'pull') | assert wirecommand.permission in (b'push', b'pull') | ||||
if authedperm == b'ro' and wirecommand.permission != b'pull': | if authedperm == b'ro' and wirecommand.permission != b'pull': | ||||
# TODO proper error mechanism | # TODO proper error mechanism | ||||
res.status = b'403 Forbidden' | res.status = b'403 Forbidden' | ||||
res.headers[b'Content-Type'] = b'text/plain' | res.headers[b'Content-Type'] = b'text/plain' | ||||
res.setbodybytes( | res.setbodybytes( | ||||
_(b'insufficient permissions to execute ' b'command: %s') | _(b'insufficient permissions to execute command: %s') | ||||
% command[b'command'] | % command[b'command'] | ||||
) | ) | ||||
return True | return True | ||||
# TODO should we also call checkperm() here? Maybe not if we're going | # TODO should we also call checkperm() here? Maybe not if we're going | ||||
# to overhaul that API. The granted scope from the URL check should | # to overhaul that API. The granted scope from the URL check should | ||||
# be good enough. | # be good enough. | ||||
else: | else: | ||||
# Don't allow multiple commands outside of ``multirequest`` URL. | # Don't allow multiple commands outside of ``multirequest`` URL. | ||||
if issubsequent: | if issubsequent: | ||||
# TODO proper error mechanism | # TODO proper error mechanism | ||||
res.status = b'200 OK' | res.status = b'200 OK' | ||||
res.headers[b'Content-Type'] = b'text/plain' | res.headers[b'Content-Type'] = b'text/plain' | ||||
res.setbodybytes( | res.setbodybytes( | ||||
_(b'multiple commands cannot be issued to this ' b'URL') | _(b'multiple commands cannot be issued to this URL') | ||||
) | ) | ||||
return True | return True | ||||
if reqcommand != command[b'command']: | if reqcommand != command[b'command']: | ||||
# TODO define proper error mechanism | # TODO define proper error mechanism | ||||
res.status = b'200 OK' | res.status = b'200 OK' | ||||
res.headers[b'Content-Type'] = b'text/plain' | res.headers[b'Content-Type'] = b'text/plain' | ||||
res.setbodybytes(_(b'command in frame must match command in URL')) | res.setbodybytes(_(b'command in frame must match command in URL')) | ||||
b'got %s; expected "push" or "pull"' % permission | b'got %s; expected "push" or "pull"' % permission | ||||
) | ) | ||||
if args is None: | if args is None: | ||||
args = {} | args = {} | ||||
if not isinstance(args, dict): | if not isinstance(args, dict): | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'arguments for version 2 commands ' b'must be declared as dicts' | b'arguments for version 2 commands must be declared as dicts' | ||||
) | ) | ||||
for arg, meta in args.items(): | for arg, meta in args.items(): | ||||
if arg == b'*': | if arg == b'*': | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'* argument name not allowed on ' b'version 2 commands' | b'* argument name not allowed on version 2 commands' | ||||
) | ) | ||||
if not isinstance(meta, dict): | if not isinstance(meta, dict): | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'arguments for version 2 commands ' | b'arguments for version 2 commands ' | ||||
b'must declare metadata as a dict' | b'must declare metadata as a dict' | ||||
) | ) | ||||
meta[b'required'] = b'default' not in meta | meta[b'required'] = b'default' not in meta | ||||
meta.setdefault(b'default', lambda: None) | meta.setdefault(b'default', lambda: None) | ||||
meta.setdefault(b'validvalues', None) | meta.setdefault(b'validvalues', None) | ||||
def register(func): | def register(func): | ||||
if name in COMMANDS: | if name in COMMANDS: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'%s command already registered ' b'for version 2' % name | b'%s command already registered for version 2' % name | ||||
) | ) | ||||
COMMANDS[name] = wireprototypes.commandentry( | COMMANDS[name] = wireprototypes.commandentry( | ||||
func, | func, | ||||
args=args, | args=args, | ||||
transports=transports, | transports=transports, | ||||
permission=permission, | permission=permission, | ||||
cachekeyfn=cachekeyfn, | cachekeyfn=cachekeyfn, | ||||
cl = repo.changelog | cl = repo.changelog | ||||
clhasnode = cl.hasnode | clhasnode = cl.hasnode | ||||
seen = set() | seen = set() | ||||
nodes = [] | nodes = [] | ||||
if not isinstance(revisions, list): | if not isinstance(revisions, list): | ||||
raise error.WireprotoCommandError( | raise error.WireprotoCommandError( | ||||
b'revisions must be defined as an ' b'array' | b'revisions must be defined as an array' | ||||
) | ) | ||||
for spec in revisions: | for spec in revisions: | ||||
if b'type' not in spec: | if b'type' not in spec: | ||||
raise error.WireprotoCommandError( | raise error.WireprotoCommandError( | ||||
b'type key not present in revision specifier' | b'type key not present in revision specifier' | ||||
) | ) | ||||