The inheritance from object is implied in Python 3. So this should
be equivalent.
This change was generated via an automated search and replace. So there
may have been some accidental changes.
durin42 | |
martinvonz | |
Alphare |
hg-reviewers |
The inheritance from object is implied in Python 3. So this should
be equivalent.
This change was generated via an automated search and replace. So there
may have been some accidental changes.
Automatic diff as part of commit; lint not applicable. |
Automatic diff as part of commit; unit tests not applicable. |
Status | Author | Revision | |
---|---|---|---|
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | D12339 ui: use input() directly | |
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg |
for c in cs: | for c in cs: | ||||
failandwarn = c[-1] | failandwarn = c[-1] | ||||
preparefailandwarn(failandwarn) | preparefailandwarn(failandwarn) | ||||
filters = c[-2] | filters = c[-2] | ||||
preparefilters(filters) | preparefilters(filters) | ||||
class norepeatlogger(object): | class norepeatlogger: | ||||
def __init__(self): | def __init__(self): | ||||
self._lastseen = None | self._lastseen = None | ||||
def log(self, fname, lineno, line, msg, blame): | def log(self, fname, lineno, line, msg, blame): | ||||
"""print error related a to given line of a given file. | """print error related a to given line of a given file. | ||||
The faulty line will also be printed but only once in the case | The faulty line will also be printed but only once in the case | ||||
of multiple errors. | of multiple errors. |
server = subprocess.Popen( | server = subprocess.Popen( | ||||
tonative(cmdline), stdin=subprocess.PIPE, stdout=subprocess.PIPE | tonative(cmdline), stdin=subprocess.PIPE, stdout=subprocess.PIPE | ||||
) | ) | ||||
return server | return server | ||||
class unixconnection(object): | class unixconnection: | ||||
def __init__(self, sockpath): | def __init__(self, sockpath): | ||||
self.sock = sock = socket.socket(socket.AF_UNIX) | self.sock = sock = socket.socket(socket.AF_UNIX) | ||||
sock.connect(sockpath) | sock.connect(sockpath) | ||||
self.stdin = sock.makefile('wb') | self.stdin = sock.makefile('wb') | ||||
self.stdout = sock.makefile('rb') | self.stdout = sock.makefile('rb') | ||||
def wait(self): | def wait(self): | ||||
self.stdin.close() | self.stdin.close() | ||||
self.stdout.close() | self.stdout.close() | ||||
self.sock.close() | self.sock.close() | ||||
class unixserver(object): | class unixserver: | ||||
def __init__(self, sockpath, logpath=None, repopath=None): | def __init__(self, sockpath, logpath=None, repopath=None): | ||||
self.sockpath = sockpath | self.sockpath = sockpath | ||||
cmdline = [b'hg', b'serve', b'--cmdserver', b'unix', b'-a', sockpath] | cmdline = [b'hg', b'serve', b'--cmdserver', b'unix', b'-a', sockpath] | ||||
if repopath: | if repopath: | ||||
cmdline += [b'-R', repopath] | cmdline += [b'-R', repopath] | ||||
if logpath: | if logpath: | ||||
stdout = open(logpath, 'a') | stdout = open(logpath, 'a') | ||||
stderr = subprocess.STDOUT | stderr = subprocess.STDOUT |
def getlen(ui): | def getlen(ui): | ||||
if ui.configbool(b"perf", b"stub", False): | if ui.configbool(b"perf", b"stub", False): | ||||
return lambda x: 1 | return lambda x: 1 | ||||
return len | return len | ||||
class noop(object): | class noop: | ||||
"""dummy context manager""" | """dummy context manager""" | ||||
def __enter__(self): | def __enter__(self): | ||||
pass | pass | ||||
def __exit__(self, *args): | def __exit__(self, *args): | ||||
pass | pass | ||||
if uiformatter: | if uiformatter: | ||||
fm = uiformatter(b'perf', opts) | fm = uiformatter(b'perf', opts) | ||||
else: | else: | ||||
# for "historical portability": | # for "historical portability": | ||||
# define formatter locally, because ui.formatter has been | # define formatter locally, because ui.formatter has been | ||||
# available since 2.2 (or ae5f92e154d3) | # available since 2.2 (or ae5f92e154d3) | ||||
from mercurial import node | from mercurial import node | ||||
class defaultformatter(object): | class defaultformatter: | ||||
"""Minimized composition of baseformatter and plainformatter""" | """Minimized composition of baseformatter and plainformatter""" | ||||
def __init__(self, ui, topic, opts): | def __init__(self, ui, topic, opts): | ||||
self._ui = ui | self._ui = ui | ||||
if ui.debugflag: | if ui.debugflag: | ||||
self.hexfunc = node.hex | self.hexfunc = node.hex | ||||
else: | else: | ||||
self.hexfunc = node.short | self.hexfunc = node.short | ||||
b"missing attribute %s of %s might break assumption" | b"missing attribute %s of %s might break assumption" | ||||
b" of performance measurement" | b" of performance measurement" | ||||
) | ) | ||||
% (name, obj) | % (name, obj) | ||||
) | ) | ||||
origvalue = getattr(obj, _sysstr(name)) | origvalue = getattr(obj, _sysstr(name)) | ||||
class attrutil(object): | class attrutil: | ||||
def set(self, newvalue): | def set(self, newvalue): | ||||
setattr(obj, _sysstr(name), newvalue) | setattr(obj, _sysstr(name), newvalue) | ||||
def restore(self): | def restore(self): | ||||
setattr(obj, _sysstr(name), origvalue) | setattr(obj, _sysstr(name), origvalue) | ||||
return attrutil() | return attrutil() | ||||
fm, | fm, | ||||
totaltime, | totaltime, | ||||
title="total time (%d revs)" % resultcount, | title="total time (%d revs)" % resultcount, | ||||
displayall=displayall, | displayall=displayall, | ||||
) | ) | ||||
fm.end() | fm.end() | ||||
class _faketr(object): | class _faketr: | ||||
def add(s, x, y, z=None): | def add(s, x, y, z=None): | ||||
return None | return None | ||||
def _timeonewrite( | def _timeonewrite( | ||||
ui, | ui, | ||||
orig, | orig, | ||||
source, | source, |
def writeerr(data): | def writeerr(data): | ||||
# write "data" in BYTES into stderr | # write "data" in BYTES into stderr | ||||
sys.stderr.write(data) | sys.stderr.write(data) | ||||
#################### | #################### | ||||
class embeddedmatcher(object): # pytype: disable=ignored-metaclass | class embeddedmatcher: # pytype: disable=ignored-metaclass | ||||
"""Base class to detect embedded code fragments in *.t test script""" | """Base class to detect embedded code fragments in *.t test script""" | ||||
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta | ||||
def __init__(self, desc): | def __init__(self, desc): | ||||
self.desc = desc | self.desc = desc | ||||
@abc.abstractmethod | @abc.abstractmethod | ||||
:filename: a name of embedded code, if it is explicitly specified | :filename: a name of embedded code, if it is explicitly specified | ||||
(e.g. "foobar" of "cat >> foobar <<EOF"). | (e.g. "foobar" of "cat >> foobar <<EOF"). | ||||
Otherwise, this is None | Otherwise, this is None | ||||
:starts: line number (1-origin), at which embedded code starts (inclusive) | :starts: line number (1-origin), at which embedded code starts (inclusive) | ||||
:ends: line number (1-origin), at which embedded code ends (exclusive) | :ends: line number (1-origin), at which embedded code ends (exclusive) | ||||
:code: extracted embedded code, which is single-stringified | :code: extracted embedded code, which is single-stringified | ||||
>>> class ambigmatcher(object): | >>> class ambigmatcher: | ||||
... # mock matcher class to examine implementation of | ... # mock matcher class to examine implementation of | ||||
... # "ambiguous matching" corner case | ... # "ambiguous matching" corner case | ||||
... def __init__(self, desc, matchfunc): | ... def __init__(self, desc, matchfunc): | ||||
... self.desc = desc | ... self.desc = desc | ||||
... self.matchfunc = matchfunc | ... self.matchfunc = matchfunc | ||||
... def startsat(self, line): | ... def startsat(self, line): | ||||
... return self.matchfunc(line) | ... return self.matchfunc(line) | ||||
>>> ambig1 = ambigmatcher('ambiguous #1', | >>> ambig1 = ambigmatcher('ambiguous #1', |
self.translator_class = Translator | self.translator_class = Translator | ||||
def translate(self): | def translate(self): | ||||
visitor = self.translator_class(self.document) | visitor = self.translator_class(self.document) | ||||
self.document.walkabout(visitor) | self.document.walkabout(visitor) | ||||
self.output = visitor.astext() | self.output = visitor.astext() | ||||
class Table(object): | class Table: | ||||
def __init__(self): | def __init__(self): | ||||
self._rows = [] | self._rows = [] | ||||
self._options = ['center'] | self._options = ['center'] | ||||
self._tab_char = '\t' | self._tab_char = '\t' | ||||
self._coldefs = [] | self._coldefs = [] | ||||
def new_row(self): | def new_row(self): | ||||
self._rows.append([]) | self._rows.append([]) | ||||
text = '\\&' + text | text = '\\&' + text | ||||
text = text.replace('\n.', '\n\\&.') | text = text.replace('\n.', '\n\\&.') | ||||
self.body.append(text) | self.body.append(text) | ||||
def depart_Text(self, node): | def depart_Text(self, node): | ||||
pass | pass | ||||
def list_start(self, node): | def list_start(self, node): | ||||
class enum_char(object): | class enum_char: | ||||
enum_style = { | enum_style = { | ||||
'bullet': '\\(bu', | 'bullet': '\\(bu', | ||||
'emdash': '\\(em', | 'emdash': '\\(em', | ||||
} | } | ||||
def __init__(self, style): | def __init__(self, style): | ||||
self._style = style | self._style = style | ||||
if 'start' in node: | if 'start' in node: |
nameroot = hgextname.split('.', 1)[0] | nameroot = hgextname.split('.', 1)[0] | ||||
contextroot = globals.get('__name__', '').split('.', 1)[0] | contextroot = globals.get('__name__', '').split('.', 1)[0] | ||||
if nameroot != contextroot: | if nameroot != contextroot: | ||||
raise | raise | ||||
# retry to import with "hgext_" prefix | # retry to import with "hgext_" prefix | ||||
return importfunc(hgextname, globals, *args, **kwargs) | return importfunc(hgextname, globals, *args, **kwargs) | ||||
class _demandmod(object): | class _demandmod: | ||||
"""module demand-loader and proxy | """module demand-loader and proxy | ||||
Specify 1 as 'level' argument at construction, to import module | Specify 1 as 'level' argument at construction, to import module | ||||
relatively. | relatively. | ||||
""" | """ | ||||
def __init__(self, name, globals, locals, level): | def __init__(self, name, globals, locals, level): | ||||
if '.' in name: | if '.' in name: |
"""Make the module load lazily.""" | """Make the module load lazily.""" | ||||
with tracing.log('demandimport %s', module): | with tracing.log('demandimport %s', module): | ||||
if _deactivated or module.__name__ in ignores: | if _deactivated or module.__name__ in ignores: | ||||
self.loader.exec_module(module) | self.loader.exec_module(module) | ||||
else: | else: | ||||
super().exec_module(module) | super().exec_module(module) | ||||
class LazyFinder(object): | class LazyFinder: | ||||
"""A wrapper around a ``MetaPathFinder`` that makes loaders lazy. | """A wrapper around a ``MetaPathFinder`` that makes loaders lazy. | ||||
``sys.meta_path`` finders have their ``find_spec()`` called to locate a | ``sys.meta_path`` finders have their ``find_spec()`` called to locate a | ||||
module. This returns a ``ModuleSpec`` if found or ``None``. The | module. This returns a ``ModuleSpec`` if found or ``None``. The | ||||
``ModuleSpec`` has a ``loader`` attribute, which is called to actually | ``ModuleSpec`` has a ``loader`` attribute, which is called to actually | ||||
load a module. | load a module. | ||||
Our class wraps an existing finder and overloads its ``find_spec()`` to | Our class wraps an existing finder and overloads its ``find_spec()`` to |
b'absorb.description': b'yellow', | b'absorb.description': b'yellow', | ||||
b'absorb.node': b'blue bold', | b'absorb.node': b'blue bold', | ||||
b'absorb.path': b'bold', | b'absorb.path': b'bold', | ||||
} | } | ||||
defaultdict = collections.defaultdict | defaultdict = collections.defaultdict | ||||
class nullui(object): | class nullui: | ||||
"""blank ui object doing nothing""" | """blank ui object doing nothing""" | ||||
debugflag = False | debugflag = False | ||||
verbose = False | verbose = False | ||||
quiet = True | quiet = True | ||||
def __getitem__(name): | def __getitem__(name): | ||||
def nullfunc(*args, **kwds): | def nullfunc(*args, **kwds): | ||||
return | return | ||||
return nullfunc | return nullfunc | ||||
class emptyfilecontext(object): | class emptyfilecontext: | ||||
"""minimal filecontext representing an empty file""" | """minimal filecontext representing an empty file""" | ||||
def __init__(self, repo): | def __init__(self, repo): | ||||
self._repo = repo | self._repo = repo | ||||
def data(self): | def data(self): | ||||
return b'' | return b'' | ||||
filectxfn=store, | filectxfn=store, | ||||
user=user, | user=user, | ||||
date=date, | date=date, | ||||
branch=None, | branch=None, | ||||
extra=extra, | extra=extra, | ||||
) | ) | ||||
class filefixupstate(object): | class filefixupstate: | ||||
"""state needed to apply fixups to a single file | """state needed to apply fixups to a single file | ||||
internally, it keeps file contents of several revisions and a linelog. | internally, it keeps file contents of several revisions and a linelog. | ||||
the linelog uses odd revision numbers for original contents (fctxs passed | the linelog uses odd revision numbers for original contents (fctxs passed | ||||
to __init__), and even revision numbers for fixups, like: | to __init__), and even revision numbers for fixups, like: | ||||
linelog rev 1: self.fctxs[0] (from an immutable "public" changeset) | linelog rev 1: self.fctxs[0] (from an immutable "public" changeset) | ||||
bidxs[i - b1], | bidxs[i - b1], | ||||
b'+', | b'+', | ||||
trim(blines[i]), | trim(blines[i]), | ||||
b'inserted', | b'inserted', | ||||
b'diff.inserted', | b'diff.inserted', | ||||
) | ) | ||||
class fixupstate(object): | class fixupstate: | ||||
"""state needed to run absorb | """state needed to run absorb | ||||
internally, it keeps paths and filefixupstates. | internally, it keeps paths and filefixupstates. | ||||
a typical use is like filefixupstates: | a typical use is like filefixupstates: | ||||
1. call diffwith, to calculate fixups | 1. call diffwith, to calculate fixups | ||||
2. (optionally), present fixups to the user, or edit fixups | 2. (optionally), present fixups to the user, or edit fixups |
b'ignore', | b'ignore', | ||||
default=lambda: [b'chgserver', b'cmdserver', b'extension'], | default=lambda: [b'chgserver', b'cmdserver', b'extension'], | ||||
) | ) | ||||
configitem(b'blackbox', b'date-format', default=b'') | configitem(b'blackbox', b'date-format', default=b'') | ||||
_lastlogger = loggingutil.proxylogger() | _lastlogger = loggingutil.proxylogger() | ||||
class blackboxlogger(object): | class blackboxlogger: | ||||
def __init__(self, ui, repo): | def __init__(self, ui, repo): | ||||
self._repo = repo | self._repo = repo | ||||
self._trackedevents = set(ui.configlist(b'blackbox', b'track')) | self._trackedevents = set(ui.configlist(b'blackbox', b'track')) | ||||
self._ignoredevents = set(ui.configlist(b'blackbox', b'ignore')) | self._ignoredevents = set(ui.configlist(b'blackbox', b'ignore')) | ||||
self._maxfiles = ui.configint(b'blackbox', b'maxfiles') | self._maxfiles = ui.configint(b'blackbox', b'maxfiles') | ||||
self._maxsize = ui.configbytes(b'blackbox', b'maxsize') | self._maxsize = ui.configbytes(b'blackbox', b'maxsize') | ||||
self._inlog = False | self._inlog = False | ||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', | b'bugzilla', | ||||
b'version', | b'version', | ||||
default=None, | default=None, | ||||
) | ) | ||||
class bzaccess(object): | class bzaccess: | ||||
'''Base class for access to Bugzilla.''' | '''Base class for access to Bugzilla.''' | ||||
def __init__(self, ui): | def __init__(self, ui): | ||||
self.ui = ui | self.ui = ui | ||||
usermap = self.ui.config(b'bugzilla', b'usermap') | usermap = self.ui.config(b'bugzilla', b'usermap') | ||||
if usermap: | if usermap: | ||||
self.ui.readconfig(usermap, sections=[b'usermap']) | self.ui.readconfig(usermap, sections=[b'usermap']) | ||||
if len(ids) != 1: | if len(ids) != 1: | ||||
raise error.Abort(_(b'unknown database schema')) | raise error.Abort(_(b'unknown database schema')) | ||||
return ids[0][0] | return ids[0][0] | ||||
# Bugzilla via XMLRPC interface. | # Bugzilla via XMLRPC interface. | ||||
class cookietransportrequest(object): | class cookietransportrequest: | ||||
"""A Transport request method that retains cookies over its lifetime. | """A Transport request method that retains cookies over its lifetime. | ||||
The regular xmlrpclib transports ignore cookies. Which causes | The regular xmlrpclib transports ignore cookies. Which causes | ||||
a bit of a problem when you need a cookie-based login, as with | a bit of a problem when you need a cookie-based login, as with | ||||
the Bugzilla XMLRPC interface prior to 4.4.3. | the Bugzilla XMLRPC interface prior to 4.4.3. | ||||
So this is a helper for defining a Transport which looks for | So this is a helper for defining a Transport which looks for | ||||
cookies being set in responses and saves them to add to all future | cookies being set in responses and saves them to add to all future | ||||
"""Force sending of Bugzilla notification emails. | """Force sending of Bugzilla notification emails. | ||||
Only required if the access method does not trigger notification | Only required if the access method does not trigger notification | ||||
emails automatically. | emails automatically. | ||||
""" | """ | ||||
pass | pass | ||||
class bugzilla(object): | class bugzilla: | ||||
# supported versions of bugzilla. different versions have | # supported versions of bugzilla. different versions have | ||||
# different schemas. | # different schemas. | ||||
_versions = { | _versions = { | ||||
b'2.16': bzmysql, | b'2.16': bzmysql, | ||||
b'2.18': bzmysql_2_18, | b'2.18': bzmysql_2_18, | ||||
b'3.0': bzmysql_3_0, | b'3.0': bzmysql_3_0, | ||||
b'xmlrpc': bzxmlrpc, | b'xmlrpc': bzxmlrpc, | ||||
b'xmlrpc+email': bzxmlrpcemail, | b'xmlrpc+email': bzxmlrpcemail, |
def _encodeornone(d): | def _encodeornone(d): | ||||
if d is None: | if d is None: | ||||
return | return | ||||
return d.encode('latin1') | return d.encode('latin1') | ||||
class _shlexpy3proxy(object): | class _shlexpy3proxy: | ||||
def __init__(self, l): | def __init__(self, l): | ||||
self._l = l | self._l = l | ||||
def __iter__(self): | def __iter__(self): | ||||
return (_encodeornone(v) for v in self._l) | return (_encodeornone(v) for v in self._l) | ||||
def get_token(self): | def get_token(self): | ||||
return _encodeornone(self._l.get_token()) | return _encodeornone(self._l.get_token()) | ||||
class NoRepo(Exception): | class NoRepo(Exception): | ||||
pass | pass | ||||
SKIPREV = b'SKIP' | SKIPREV = b'SKIP' | ||||
class commit(object): | class commit: | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
author, | author, | ||||
date, | date, | ||||
desc, | desc, | ||||
parents, | parents, | ||||
branch=None, | branch=None, | ||||
rev=None, | rev=None, | ||||
self.rev = rev | self.rev = rev | ||||
self.extra = extra or {} | self.extra = extra or {} | ||||
self.sortkey = sortkey | self.sortkey = sortkey | ||||
self.saverev = saverev | self.saverev = saverev | ||||
self.phase = phase | self.phase = phase | ||||
self.ctx = ctx # for hg to hg conversions | self.ctx = ctx # for hg to hg conversions | ||||
class converter_source(object): | class converter_source: | ||||
"""Conversion source interface""" | """Conversion source interface""" | ||||
def __init__(self, ui, repotype, path=None, revs=None): | def __init__(self, ui, repotype, path=None, revs=None): | ||||
"""Initialize conversion source (or raise NoRepo("message") | """Initialize conversion source (or raise NoRepo("message") | ||||
exception if path is not a valid repository)""" | exception if path is not a valid repository)""" | ||||
self.ui = ui | self.ui = ui | ||||
self.path = path | self.path = path | ||||
self.revs = revs | self.revs = revs | ||||
def checkrevformat(self, revstr, mapname=b'splicemap'): | def checkrevformat(self, revstr, mapname=b'splicemap'): | ||||
"""revstr is a string that describes a revision in the given | """revstr is a string that describes a revision in the given | ||||
source control system. Return true if revstr has correct | source control system. Return true if revstr has correct | ||||
format. | format. | ||||
""" | """ | ||||
return True | return True | ||||
class converter_sink(object): | class converter_sink: | ||||
"""Conversion sink (target) interface""" | """Conversion sink (target) interface""" | ||||
def __init__(self, ui, repotype, path): | def __init__(self, ui, repotype, path): | ||||
"""Initialize conversion sink (or raise NoRepo("message") | """Initialize conversion sink (or raise NoRepo("message") | ||||
exception if path is not a valid repository) | exception if path is not a valid repository) | ||||
created is a list of paths to remove if a fatal error occurs | created is a list of paths to remove if a fatal error occurs | ||||
later""" | later""" | ||||
def hascommitforsplicemap(self, rev): | def hascommitforsplicemap(self, rev): | ||||
"""This method is for the special needs for splicemap handling and not | """This method is for the special needs for splicemap handling and not | ||||
for general use. Returns True if the sink contains rev, aborts on some | for general use. Returns True if the sink contains rev, aborts on some | ||||
special cases.""" | special cases.""" | ||||
raise NotImplementedError | raise NotImplementedError | ||||
class commandline(object): | class commandline: | ||||
def __init__(self, ui, command): | def __init__(self, ui, command): | ||||
self.ui = ui | self.ui = ui | ||||
self.command = command | self.command = command | ||||
def prerun(self): | def prerun(self): | ||||
pass | pass | ||||
def postrun(self): | def postrun(self): |
return sink(ui, name, path) | return sink(ui, name, path) | ||||
except NoRepo as inst: | except NoRepo as inst: | ||||
ui.note(_(b"convert: %s\n") % inst) | ui.note(_(b"convert: %s\n") % inst) | ||||
except MissingTool as inst: | except MissingTool as inst: | ||||
raise error.Abort(b'%s\n' % inst) | raise error.Abort(b'%s\n' % inst) | ||||
raise error.Abort(_(b'%s: unknown repository type') % path) | raise error.Abort(_(b'%s: unknown repository type') % path) | ||||
class progresssource(object): | class progresssource: | ||||
def __init__(self, ui, source, filecount): | def __init__(self, ui, source, filecount): | ||||
self.ui = ui | self.ui = ui | ||||
self.source = source | self.source = source | ||||
self.progress = ui.makeprogress( | self.progress = ui.makeprogress( | ||||
_(b'getting files'), unit=_(b'files'), total=filecount | _(b'getting files'), unit=_(b'files'), total=filecount | ||||
) | ) | ||||
def getfile(self, file, rev): | def getfile(self, file, rev): | ||||
self.progress.increment(item=file) | self.progress.increment(item=file) | ||||
return self.source.getfile(file, rev) | return self.source.getfile(file, rev) | ||||
def targetfilebelongstosource(self, targetfilename): | def targetfilebelongstosource(self, targetfilename): | ||||
return self.source.targetfilebelongstosource(targetfilename) | return self.source.targetfilebelongstosource(targetfilename) | ||||
def lookuprev(self, rev): | def lookuprev(self, rev): | ||||
return self.source.lookuprev(rev) | return self.source.lookuprev(rev) | ||||
def close(self): | def close(self): | ||||
self.progress.complete() | self.progress.complete() | ||||
class converter(object): | class converter: | ||||
def __init__(self, ui, source, dest, revmapfile, opts): | def __init__(self, ui, source, dest, revmapfile, opts): | ||||
self.source = source | self.source = source | ||||
self.dest = dest | self.dest = dest | ||||
self.ui = ui | self.ui = ui | ||||
self.opts = opts | self.opts = opts | ||||
self.commitcache = {} | self.commitcache = {} | ||||
self.authors = {} | self.authors = {} |
) | ) | ||||
from mercurial.utils import ( | from mercurial.utils import ( | ||||
dateutil, | dateutil, | ||||
procutil, | procutil, | ||||
stringutil, | stringutil, | ||||
) | ) | ||||
class logentry(object): | class logentry: | ||||
"""Class logentry has the following attributes: | """Class logentry has the following attributes: | ||||
.author - author name as CVS knows it | .author - author name as CVS knows it | ||||
.branch - name of branch this revision is on | .branch - name of branch this revision is on | ||||
.branches - revision tuple of branches starting at this revision | .branches - revision tuple of branches starting at this revision | ||||
.comment - commit message | .comment - commit message | ||||
.commitid - CVS commitid or None | .commitid - CVS commitid or None | ||||
.date - the commit date as a (time, tz) tuple | .date - the commit date as a (time, tz) tuple | ||||
.dead - true if file revision is dead | .dead - true if file revision is dead | ||||
hint=_(b'check convert.cvsps.logencoding configuration'), | hint=_(b'check convert.cvsps.logencoding configuration'), | ||||
) | ) | ||||
hook.hook(ui, None, b"cvslog", True, log=log) | hook.hook(ui, None, b"cvslog", True, log=log) | ||||
return log | return log | ||||
class changeset(object): | class changeset: | ||||
"""Class changeset has the following attributes: | """Class changeset has the following attributes: | ||||
.id - integer identifying this changeset (list index) | .id - integer identifying this changeset (list index) | ||||
.author - author name as CVS knows it | .author - author name as CVS knows it | ||||
.branch - name of branch this changeset is on, or None | .branch - name of branch this changeset is on, or None | ||||
.comment - commit message | .comment - commit message | ||||
.commitid - CVS commitid or None | .commitid - CVS commitid or None | ||||
.date - the commit date as a (time,tz) tuple | .date - the commit date as a (time,tz) tuple | ||||
.entries - list of logentry objects in this changeset | .entries - list of logentry objects in this changeset |
def normalize(path): | def normalize(path): | ||||
"""We use posixpath.normpath to support cross-platform path format. | """We use posixpath.normpath to support cross-platform path format. | ||||
However, it doesn't handle None input. So we wrap it up.""" | However, it doesn't handle None input. So we wrap it up.""" | ||||
if path is None: | if path is None: | ||||
return None | return None | ||||
return posixpath.normpath(path) | return posixpath.normpath(path) | ||||
class filemapper(object): | class filemapper: | ||||
"""Map and filter filenames when importing. | """Map and filter filenames when importing. | ||||
A name can be mapped to itself, a new name, or None (omit from new | A name can be mapped to itself, a new name, or None (omit from new | ||||
repository).""" | repository).""" | ||||
def __init__(self, ui, path=None): | def __init__(self, ui, path=None): | ||||
self.ui = ui | self.ui = ui | ||||
self.include = {} | self.include = {} | ||||
self.exclude = {} | self.exclude = {} |
error, | error, | ||||
pycompat, | pycompat, | ||||
util, | util, | ||||
) | ) | ||||
from . import common | from . import common | ||||
class submodule(object): | class submodule: | ||||
def __init__(self, path, node, url): | def __init__(self, path, node, url): | ||||
self.path = path | self.path = path | ||||
self.node = node | self.node = node | ||||
self.url = url | self.url = url | ||||
def hgsub(self): | def hgsub(self): | ||||
return b"%s = [git]%s" % (self.path, self.url) | return b"%s = [git]%s" % (self.path, self.url) | ||||
from mercurial.utils import ( | from mercurial.utils import ( | ||||
dateutil, | dateutil, | ||||
procutil, | procutil, | ||||
) | ) | ||||
from . import common | from . import common | ||||
class gnuarch_source(common.converter_source, common.commandline): | class gnuarch_source(common.converter_source, common.commandline): | ||||
class gnuarch_rev(object): | class gnuarch_rev: | ||||
def __init__(self, rev): | def __init__(self, rev): | ||||
self.rev = rev | self.rev = rev | ||||
self.summary = b'' | self.summary = b'' | ||||
self.date = None | self.date = None | ||||
self.author = b'' | self.author = b'' | ||||
self.continuationof = None | self.continuationof = None | ||||
self.add_files = [] | self.add_files = [] | ||||
self.mod_files = [] | self.mod_files = [] |
def optrev(number): | def optrev(number): | ||||
optrev = svn.core.svn_opt_revision_t() | optrev = svn.core.svn_opt_revision_t() | ||||
optrev.kind = svn.core.svn_opt_revision_number | optrev.kind = svn.core.svn_opt_revision_number | ||||
optrev.value.number = number | optrev.value.number = number | ||||
return optrev | return optrev | ||||
class changedpath(object): | class changedpath: | ||||
def __init__(self, p): | def __init__(self, p): | ||||
self.copyfrom_path = p.copyfrom_path | self.copyfrom_path = p.copyfrom_path | ||||
self.copyfrom_rev = p.copyfrom_rev | self.copyfrom_rev = p.copyfrom_rev | ||||
self.action = p.action | self.action = p.action | ||||
def get_log_child( | def get_log_child( | ||||
fp, | fp, | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'debugsvnlog could not load Subversion python bindings') | _(b'debugsvnlog could not load Subversion python bindings') | ||||
) | ) | ||||
args = decodeargs(ui.fin.read()) | args = decodeargs(ui.fin.read()) | ||||
get_log_child(ui.fout, *args) | get_log_child(ui.fout, *args) | ||||
class logstream(object): | class logstream: | ||||
"""Interruptible revision log iterator.""" | """Interruptible revision log iterator.""" | ||||
def __init__(self, stdout): | def __init__(self, stdout): | ||||
self._stdout = stdout | self._stdout = stdout | ||||
def __iter__(self): | def __iter__(self): | ||||
while True: | while True: | ||||
try: | try: |
return svn.core.svn_auth_open(providers, pool) | return svn.core.svn_auth_open(providers, pool) | ||||
class NotBranchError(SubversionException): | class NotBranchError(SubversionException): | ||||
pass | pass | ||||
class SvnRaTransport(object): | class SvnRaTransport: | ||||
""" | """ | ||||
Open an ra connection to a Subversion repository. | Open an ra connection to a Subversion repository. | ||||
""" | """ | ||||
def __init__(self, url=b"", ra=None): | def __init__(self, url=b"", ra=None): | ||||
self.pool = Pool() | self.pool = Pool() | ||||
self.svn_url = url | self.svn_url = url | ||||
self.username = b'' | self.username = b'' | ||||
svn.core.SVN_ERR_BAD_URL, | svn.core.SVN_ERR_BAD_URL, | ||||
): | ): | ||||
raise NotBranchError(url) | raise NotBranchError(url) | ||||
raise | raise | ||||
else: | else: | ||||
self.ra = ra | self.ra = ra | ||||
svn.ra.reparent(self.ra, self.svn_url.encode('utf8')) | svn.ra.reparent(self.ra, self.svn_url.encode('utf8')) | ||||
class Reporter(object): | class Reporter: | ||||
def __init__(self, reporter_data): | def __init__(self, reporter_data): | ||||
self._reporter, self._baton = reporter_data | self._reporter, self._baton = reporter_data | ||||
def set_path(self, path, revnum, start_empty, lock_token, pool=None): | def set_path(self, path, revnum, start_empty, lock_token, pool=None): | ||||
svn.ra.reporter2_invoke_set_path( | svn.ra.reporter2_invoke_set_path( | ||||
self._reporter, | self._reporter, | ||||
self._baton, | self._baton, | ||||
path, | path, |
b'to-crlf': tocrlf, | b'to-crlf': tocrlf, | ||||
b'is-binary': isbinary, | b'is-binary': isbinary, | ||||
# The following provide backwards compatibility with win32text | # The following provide backwards compatibility with win32text | ||||
b'cleverencode:': tolf, | b'cleverencode:': tolf, | ||||
b'cleverdecode:': tocrlf, | b'cleverdecode:': tocrlf, | ||||
} | } | ||||
class eolfile(object): | class eolfile: | ||||
def __init__(self, ui, root, data): | def __init__(self, ui, root, data): | ||||
self._decode = { | self._decode = { | ||||
b'LF': b'to-lf', | b'LF': b'to-lf', | ||||
b'CRLF': b'to-crlf', | b'CRLF': b'to-crlf', | ||||
b'BIN': b'is-binary', | b'BIN': b'is-binary', | ||||
} | } | ||||
self._encode = { | self._encode = { | ||||
b'LF': b'to-lf', | b'LF': b'to-lf', |
option = opts.get(b'option') | option = opts.get(b'option') | ||||
if not program: | if not program: | ||||
program = b'diff' | program = b'diff' | ||||
option = option or [b'-Npru'] | option = option or [b'-Npru'] | ||||
cmdline = b' '.join(map(procutil.shellquote, [program] + option)) | cmdline = b' '.join(map(procutil.shellquote, [program] + option)) | ||||
return dodiff(ui, repo, cmdline, pats, opts) | return dodiff(ui, repo, cmdline, pats, opts) | ||||
class savedcmd(object): | class savedcmd: | ||||
"""use external program to diff repository (or selected files) | """use external program to diff repository (or selected files) | ||||
Show differences between revisions for the specified files, using | Show differences between revisions for the specified files, using | ||||
the following program:: | the following program:: | ||||
%(path)s | %(path)s | ||||
When two revision arguments are given, then changes are shown | When two revision arguments are given, then changes are shown |
sorted((k, getattr(diffopts, k)) for k in mdiff.diffopts.defaults) | sorted((k, getattr(diffopts, k)) for k in mdiff.diffopts.defaults) | ||||
) | ) | ||||
return hex(hashutil.sha1(diffoptstr).digest())[:6] | return hex(hashutil.sha1(diffoptstr).digest())[:6] | ||||
_defaultdiffopthash = hashdiffopts(mdiff.defaultopts) | _defaultdiffopthash = hashdiffopts(mdiff.defaultopts) | ||||
class annotateopts(object): | class annotateopts: | ||||
"""like mercurial.mdiff.diffopts, but is for annotate | """like mercurial.mdiff.diffopts, but is for annotate | ||||
followrename: follow renames, like "hg annotate -f" | followrename: follow renames, like "hg annotate -f" | ||||
followmerge: follow p2 of a merge changeset, otherwise p2 is ignored | followmerge: follow p2 of a merge changeset, otherwise p2 is ignored | ||||
""" | """ | ||||
defaults = { | defaults = { | ||||
b'diffopts': None, | b'diffopts': None, | ||||
if diffopthash != _defaultdiffopthash: | if diffopthash != _defaultdiffopthash: | ||||
result += b'i' + diffopthash | result += b'i' + diffopthash | ||||
return result or b'default' | return result or b'default' | ||||
defaultopts = annotateopts() | defaultopts = annotateopts() | ||||
class _annotatecontext(object): | class _annotatecontext: | ||||
"""do not use this class directly as it does not use lock to protect | """do not use this class directly as it does not use lock to protect | ||||
writes. use "with annotatecontext(...)" instead. | writes. use "with annotatecontext(...)" instead. | ||||
""" | """ | ||||
def __init__(self, repo, path, linelogpath, revmappath, opts): | def __init__(self, repo, path, linelogpath, revmappath, opts): | ||||
self.repo = repo | self.repo = repo | ||||
self.ui = repo.ui | self.ui = repo.ui | ||||
self.path = path | self.path = path | ||||
"""silent, best-effort unlink""" | """silent, best-effort unlink""" | ||||
for path in paths: | for path in paths: | ||||
try: | try: | ||||
util.unlink(path) | util.unlink(path) | ||||
except OSError: | except OSError: | ||||
pass | pass | ||||
class pathhelper(object): | class pathhelper: | ||||
"""helper for getting paths for lockfile, linelog and revmap""" | """helper for getting paths for lockfile, linelog and revmap""" | ||||
def __init__(self, repo, path, opts=defaultopts): | def __init__(self, repo, path, opts=defaultopts): | ||||
# different options use different directories | # different options use different directories | ||||
self._vfspath = os.path.join( | self._vfspath = os.path.join( | ||||
b'fastannotate', opts.shortstr, encodedir(path) | b'fastannotate', opts.shortstr, encodedir(path) | ||||
) | ) | ||||
self._repo = repo | self._repo = repo |
pycompat, | pycompat, | ||||
templatefilters, | templatefilters, | ||||
util, | util, | ||||
) | ) | ||||
from mercurial.utils import dateutil | from mercurial.utils import dateutil | ||||
# imitating mercurial.commands.annotate, not using the vanilla formatter since | # imitating mercurial.commands.annotate, not using the vanilla formatter since | ||||
# the data structures are a bit different, and we have some fast paths. | # the data structures are a bit different, and we have some fast paths. | ||||
class defaultformatter(object): | class defaultformatter: | ||||
"""the default formatter that does leftpad and support some common flags""" | """the default formatter that does leftpad and support some common flags""" | ||||
def __init__(self, ui, repo, opts): | def __init__(self, ui, repo, opts): | ||||
self.ui = ui | self.ui = ui | ||||
self.opts = opts | self.opts = opts | ||||
if ui.quiet: | if ui.quiet: | ||||
datefunc = dateutil.shortdate | datefunc = dateutil.shortdate |
# whether the changeset changes the file path (ie. is a rename) | # whether the changeset changes the file path (ie. is a rename) | ||||
renameflag = 2 | renameflag = 2 | ||||
# len(mercurial.node.nullid) | # len(mercurial.node.nullid) | ||||
_hshlen = 20 | _hshlen = 20 | ||||
class revmap(object): | class revmap: | ||||
"""trivial hg bin hash - linelog rev bidirectional map | """trivial hg bin hash - linelog rev bidirectional map | ||||
also stores a flag (uint8) for each revision, and track renames. | also stores a flag (uint8) for each revision, and track renames. | ||||
""" | """ | ||||
HEADER = b'REVMAP1\0' | HEADER = b'REVMAP1\0' | ||||
def __init__(self, path=None): | def __init__(self, path=None): |
) | ) | ||||
from . import ( | from . import ( | ||||
context, | context, | ||||
revmap, | revmap, | ||||
) | ) | ||||
class _lazyfctx(object): | class _lazyfctx: | ||||
"""delegates to fctx but do not construct fctx when unnecessary""" | """delegates to fctx but do not construct fctx when unnecessary""" | ||||
def __init__(self, repo, node, path): | def __init__(self, repo, node, path): | ||||
self._node = node | self._node = node | ||||
self._path = path | self._path = path | ||||
self._repo = repo | self._repo = repo | ||||
def node(self): | def node(self): |
"""Returns the names of [fix] config options that have suboptions""" | """Returns the names of [fix] config options that have suboptions""" | ||||
names = set() | names = set() | ||||
for k, v in ui.configitems(b'fix'): | for k, v in ui.configitems(b'fix'): | ||||
if b':' in k: | if b':' in k: | ||||
names.add(k.split(b':', 1)[0]) | names.add(k.split(b':', 1)[0]) | ||||
return names | return names | ||||
class Fixer(object): | class Fixer: | ||||
"""Wraps the raw config values for a fixer with methods""" | """Wraps the raw config values for a fixer with methods""" | ||||
def __init__( | def __init__( | ||||
self, command, pattern, linerange, priority, metadata, skipclean | self, command, pattern, linerange, priority, metadata, skipclean | ||||
): | ): | ||||
self._command = command | self._command = command | ||||
self._pattern = pattern | self._pattern = pattern | ||||
self._linerange = linerange | self._linerange = linerange |
) | ) | ||||
modified, added, removed, deleted, unknown, ignored, clean = rv2 | modified, added, removed, deleted, unknown, ignored, clean = rv2 | ||||
return scmutil.status( | return scmutil.status( | ||||
modified, added, removed, deleted, unknown, ignored, clean | modified, added, removed, deleted, unknown, ignored, clean | ||||
) | ) | ||||
class poststatus(object): | class poststatus: | ||||
def __init__(self, startclock): | def __init__(self, startclock): | ||||
self._startclock = pycompat.sysbytes(startclock) | self._startclock = pycompat.sysbytes(startclock) | ||||
def __call__(self, wctx, status): | def __call__(self, wctx, status): | ||||
clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock | clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock | ||||
hashignore = _hashignore(wctx.repo().dirstate._ignore) | hashignore = _hashignore(wctx.repo().dirstate._ignore) | ||||
notefiles = ( | notefiles = ( | ||||
status.modified | status.modified | ||||
return orig(source, link_name) | return orig(source, link_name) | ||||
finally: | finally: | ||||
try: | try: | ||||
os.utime(os.path.dirname(link_name), None) | os.utime(os.path.dirname(link_name), None) | ||||
except OSError: | except OSError: | ||||
pass | pass | ||||
class state_update(object): | class state_update: | ||||
"""This context manager is responsible for dispatching the state-enter | """This context manager is responsible for dispatching the state-enter | ||||
and state-leave signals to the watchman service. The enter and leave | and state-leave signals to the watchman service. The enter and leave | ||||
methods can be invoked manually (for scenarios where context manager | methods can be invoked manually (for scenarios where context manager | ||||
semantics are not possible). If parameters oldnode and newnode are None, | semantics are not possible). If parameters oldnode and newnode are None, | ||||
they will be populated based on current working copy in enter and | they will be populated based on current working copy in enter and | ||||
leave, respectively. Similarly, if the distance is none, it will be | leave, respectively. Similarly, if the distance is none, it will be | ||||
calculated based on the oldnode and newnode in the leave method.""" | calculated based on the oldnode and newnode in the leave method.""" | ||||
""" | """ | ||||
def __init__(self, msg, cmd=None): | def __init__(self, msg, cmd=None): | ||||
super(CommandError, self).__init__( | super(CommandError, self).__init__( | ||||
"watchman command error: %s" % (msg,), cmd | "watchman command error: %s" % (msg,), cmd | ||||
) | ) | ||||
class Transport(object): | class Transport: | ||||
"""communication transport to the watchman server""" | """communication transport to the watchman server""" | ||||
buf = None | buf = None | ||||
def close(self): | def close(self): | ||||
"""tear it down""" | """tear it down""" | ||||
raise NotImplementedError() | raise NotImplementedError() | ||||
if b"\n" in b: | if b"\n" in b: | ||||
result = b"".join(self.buf) | result = b"".join(self.buf) | ||||
(line, b) = b.split(b"\n", 1) | (line, b) = b.split(b"\n", 1) | ||||
self.buf = [b] | self.buf = [b] | ||||
return result + line | return result + line | ||||
self.buf.append(b) | self.buf.append(b) | ||||
class Codec(object): | class Codec: | ||||
"""communication encoding for the watchman server""" | """communication encoding for the watchman server""" | ||||
transport = None | transport = None | ||||
def __init__(self, transport): | def __init__(self, transport): | ||||
self.transport = transport | self.transport = transport | ||||
def receive(self): | def receive(self): | ||||
# In Python 3, json.dumps is a transformation from objects possibly | # In Python 3, json.dumps is a transformation from objects possibly | ||||
# containing Unicode strings to Unicode string. Even with (the default) | # containing Unicode strings to Unicode string. Even with (the default) | ||||
# ensure_ascii=True, dumps returns a Unicode string. | # ensure_ascii=True, dumps returns a Unicode string. | ||||
if compat.PYTHON3: | if compat.PYTHON3: | ||||
cmd = cmd.encode("ascii") | cmd = cmd.encode("ascii") | ||||
self.transport.write(cmd + b"\n") | self.transport.write(cmd + b"\n") | ||||
class client(object): | class client: | ||||
"""Handles the communication with the watchman service""" | """Handles the communication with the watchman service""" | ||||
sockpath = None | sockpath = None | ||||
transport = None | transport = None | ||||
sendCodec = None | sendCodec = None | ||||
recvCodec = None | recvCodec = None | ||||
sendConn = None | sendConn = None | ||||
recvConn = None | recvConn = None |
def _buf_pos(buf, pos): | def _buf_pos(buf, pos): | ||||
ret = buf[pos] | ret = buf[pos] | ||||
# Normalize the return type to bytes | # Normalize the return type to bytes | ||||
if compat.PYTHON3 and not isinstance(ret, bytes): | if compat.PYTHON3 and not isinstance(ret, bytes): | ||||
ret = bytes((ret,)) | ret = bytes((ret,)) | ||||
return ret | return ret | ||||
class _bser_buffer(object): | class _bser_buffer: | ||||
def __init__(self, version): | def __init__(self, version): | ||||
self.bser_version = version | self.bser_version = version | ||||
self.buf = ctypes.create_string_buffer(8192) | self.buf = ctypes.create_string_buffer(8192) | ||||
if self.bser_version == 1: | if self.bser_version == 1: | ||||
struct.pack_into( | struct.pack_into( | ||||
tobytes(len(EMPTY_HEADER)) + b"s", self.buf, 0, EMPTY_HEADER | tobytes(len(EMPTY_HEADER)) + b"s", self.buf, 0, EMPTY_HEADER | ||||
) | ) | ||||
self.wpos = len(EMPTY_HEADER) | self.wpos = len(EMPTY_HEADER) | ||||
struct.pack_into(b"=i", bser_buf.buf, 2, capabilities) | struct.pack_into(b"=i", bser_buf.buf, 2, capabilities) | ||||
struct.pack_into(b"=i", bser_buf.buf, 7, obj_len) | struct.pack_into(b"=i", bser_buf.buf, 7, obj_len) | ||||
return bser_buf.buf.raw[: bser_buf.wpos] | return bser_buf.buf.raw[: bser_buf.wpos] | ||||
# This is a quack-alike with the bserObjectType in bser.c | # This is a quack-alike with the bserObjectType in bser.c | ||||
# It provides by getattr accessors and getitem for both index | # It provides by getattr accessors and getitem for both index | ||||
# and name. | # and name. | ||||
class _BunserDict(object): | class _BunserDict: | ||||
__slots__ = ("_keys", "_values") | __slots__ = ("_keys", "_values") | ||||
def __init__(self, keys, values): | def __init__(self, keys, values): | ||||
self._keys = keys | self._keys = keys | ||||
self._values = values | self._values = values | ||||
def __getattr__(self, name): | def __getattr__(self, name): | ||||
return self.__getitem__(name) | return self.__getitem__(name) | ||||
return self._values[self._keys.index(key)] | return self._values[self._keys.index(key)] | ||||
except ValueError: | except ValueError: | ||||
raise KeyError("_BunserDict has no key %s" % key) | raise KeyError("_BunserDict has no key %s" % key) | ||||
def __len__(self): | def __len__(self): | ||||
return len(self._keys) | return len(self._keys) | ||||
class Bunser(object): | class Bunser: | ||||
def __init__(self, mutable=True, value_encoding=None, value_errors=None): | def __init__(self, mutable=True, value_encoding=None, value_errors=None): | ||||
self.mutable = mutable | self.mutable = mutable | ||||
self.value_encoding = value_encoding | self.value_encoding = value_encoding | ||||
if value_encoding is None: | if value_encoding is None: | ||||
self.value_errors = None | self.value_errors = None | ||||
elif value_errors is None: | elif value_errors is None: | ||||
self.value_errors = "strict" | self.value_errors = "strict" |
pathutil, | pathutil, | ||||
util, | util, | ||||
) | ) | ||||
_version = 4 | _version = 4 | ||||
_versionformat = b">I" | _versionformat = b">I" | ||||
class state(object): | class state: | ||||
def __init__(self, repo): | def __init__(self, repo): | ||||
self._vfs = repo.vfs | self._vfs = repo.vfs | ||||
self._ui = repo.ui | self._ui = repo.ui | ||||
self._rootdir = pathutil.normasprefix(repo.root) | self._rootdir = pathutil.normasprefix(repo.root) | ||||
self._lastclock = None | self._lastclock = None | ||||
self._identity = util.filestat(None) | self._identity = util.filestat(None) | ||||
self.mode = self._ui.config(b'fsmonitor', b'mode') | self.mode = self._ui.config(b'fsmonitor', b'mode') |
class WatchmanNoRoot(Unavailable): | class WatchmanNoRoot(Unavailable): | ||||
def __init__(self, root, msg): | def __init__(self, root, msg): | ||||
self.root = root | self.root = root | ||||
super(WatchmanNoRoot, self).__init__(msg) | super(WatchmanNoRoot, self).__init__(msg) | ||||
class client(object): | class client: | ||||
def __init__(self, ui, root, timeout=1.0): | def __init__(self, ui, root, timeout=1.0): | ||||
err = None | err = None | ||||
if not self._user: | if not self._user: | ||||
err = b"couldn't get user" | err = b"couldn't get user" | ||||
warn = True | warn = True | ||||
if self._user in ui.configlist(b'fsmonitor', b'blacklistusers'): | if self._user in ui.configlist(b'fsmonitor', b'blacklistusers'): | ||||
err = b'user %s in blacklist' % self._user | err = b'user %s in blacklist' % self._user | ||||
warn = False | warn = False |
b"log-index-cache-miss", | b"log-index-cache-miss", | ||||
default=False, | default=False, | ||||
) | ) | ||||
getversion = gitutil.pygit2_version | getversion = gitutil.pygit2_version | ||||
# TODO: extract an interface for this in core | # TODO: extract an interface for this in core | ||||
class gitstore(object): # store.basicstore): | class gitstore: # store.basicstore): | ||||
def __init__(self, path, vfstype): | def __init__(self, path, vfstype): | ||||
self.vfs = vfstype(path) | self.vfs = vfstype(path) | ||||
self.opener = self.vfs | self.opener = self.vfs | ||||
self.path = self.vfs.base | self.path = self.vfs.base | ||||
self.createmode = store._calcmode(self.vfs) | self.createmode = store._calcmode(self.vfs) | ||||
# above lines should go away in favor of: | # above lines should go away in favor of: | ||||
# super(gitstore, self).__init__(path, vfstype) | # super(gitstore, self).__init__(path, vfstype) | ||||
b'pygit2 library to be installed' | b'pygit2 library to be installed' | ||||
) | ) | ||||
) | ) | ||||
return gitstore(storebasepath, vfstype) | return gitstore(storebasepath, vfstype) | ||||
return orig(requirements, storebasepath, vfstype) | return orig(requirements, storebasepath, vfstype) | ||||
class gitfilestorage(object): | class gitfilestorage: | ||||
def file(self, path): | def file(self, path): | ||||
if path[0:1] == b'/': | if path[0:1] == b'/': | ||||
path = path[1:] | path = path[1:] | ||||
return gitlog.filelog(self.store.git, self.store._db, path) | return gitlog.filelog(self.store.git, self.store._db, path) | ||||
def _makefilestorage(orig, requirements, features, **kwargs): | def _makefilestorage(orig, requirements, features, **kwargs): | ||||
store = kwargs['store'] | store = kwargs['store'] | ||||
exclude.write(b'\n.hg\n') | exclude.write(b'\n.hg\n') | ||||
with open(os.path.join(dothg, b'requires'), 'wb') as f: | with open(os.path.join(dothg, b'requires'), 'wb') as f: | ||||
f.write(b'git\n') | f.write(b'git\n') | ||||
_BMS_PREFIX = 'refs/heads/' | _BMS_PREFIX = 'refs/heads/' | ||||
class gitbmstore(object): | class gitbmstore: | ||||
def __init__(self, gitrepo): | def __init__(self, gitrepo): | ||||
self.gitrepo = gitrepo | self.gitrepo = gitrepo | ||||
self._aclean = True | self._aclean = True | ||||
self._active = gitrepo.references['HEAD'] # git head, not mark | self._active = gitrepo.references['HEAD'] # git head, not mark | ||||
def __contains__(self, name): | def __contains__(self, name): | ||||
return ( | return ( | ||||
_BMS_PREFIX + pycompat.fsdecode(name) | _BMS_PREFIX + pycompat.fsdecode(name) |
pygit2.GIT_STATUS_WT_RENAMED: b'a', | pygit2.GIT_STATUS_WT_RENAMED: b'a', | ||||
pygit2.GIT_STATUS_WT_TYPECHANGE: b'n', | pygit2.GIT_STATUS_WT_TYPECHANGE: b'n', | ||||
pygit2.GIT_STATUS_WT_UNREADABLE: b'?', | pygit2.GIT_STATUS_WT_UNREADABLE: b'?', | ||||
pygit2.GIT_STATUS_INDEX_MODIFIED | pygit2.GIT_STATUS_WT_MODIFIED: b'm', | pygit2.GIT_STATUS_INDEX_MODIFIED | pygit2.GIT_STATUS_WT_MODIFIED: b'm', | ||||
} | } | ||||
@interfaceutil.implementer(intdirstate.idirstate) | @interfaceutil.implementer(intdirstate.idirstate) | ||||
class gitdirstate(object): | class gitdirstate: | ||||
def __init__(self, ui, root, gitrepo): | def __init__(self, ui, root, gitrepo): | ||||
self._ui = ui | self._ui = ui | ||||
self._root = os.path.dirname(root) | self._root = os.path.dirname(root) | ||||
self.git = gitrepo | self.git = gitrepo | ||||
self._plchangecallbacks = {} | self._plchangecallbacks = {} | ||||
# TODO: context.poststatusfixup is bad and uses this attribute | # TODO: context.poststatusfixup is bad and uses this attribute | ||||
self._dirty = False | self._dirty = False | ||||
gitutil, | gitutil, | ||||
index, | index, | ||||
manifest as gitmanifest, | manifest as gitmanifest, | ||||
) | ) | ||||
pygit2 = gitutil.get_pygit2() | pygit2 = gitutil.get_pygit2() | ||||
class baselog(object): # revlog.revlog): | class baselog: # revlog.revlog): | ||||
"""Common implementations between changelog and manifestlog.""" | """Common implementations between changelog and manifestlog.""" | ||||
def __init__(self, gr, db): | def __init__(self, gr, db): | ||||
self.gitrepo = gr | self.gitrepo = gr | ||||
self._db = db | self._db = db | ||||
def __len__(self): | def __len__(self): | ||||
return int( | return int( | ||||
def hasnode(self, n): | def hasnode(self, n): | ||||
t = self._db.execute( | t = self._db.execute( | ||||
'SELECT node FROM changelog WHERE node = ?', | 'SELECT node FROM changelog WHERE node = ?', | ||||
(pycompat.sysstr(n),), | (pycompat.sysstr(n),), | ||||
).fetchone() | ).fetchone() | ||||
return t is not None | return t is not None | ||||
class baselogindex(object): | class baselogindex: | ||||
def __init__(self, log): | def __init__(self, log): | ||||
self._log = log | self._log = log | ||||
def has_node(self, n): | def has_node(self, n): | ||||
return self._log.rev(n) != -1 | return self._log.rev(n) != -1 | ||||
def __len__(self): | def __len__(self): | ||||
return len(self._log) | return len(self._log) |
) | ) | ||||
from . import gitutil | from . import gitutil | ||||
pygit2 = gitutil.get_pygit2() | pygit2 = gitutil.get_pygit2() | ||||
@interfaceutil.implementer(repository.imanifestdict) | @interfaceutil.implementer(repository.imanifestdict) | ||||
class gittreemanifest(object): | class gittreemanifest: | ||||
"""Expose git trees (and optionally a builder's overlay) as a manifestdict. | """Expose git trees (and optionally a builder's overlay) as a manifestdict. | ||||
Very similar to mercurial.manifest.treemanifest. | Very similar to mercurial.manifest.treemanifest. | ||||
""" | """ | ||||
def __init__(self, git_repo, root_tree, pending_changes): | def __init__(self, git_repo, root_tree, pending_changes): | ||||
"""Initializer. | """Initializer. | ||||
# being clever about walking over the sets... | # being clever about walking over the sets... | ||||
baseline = set(self._walkonetree(self._tree, match, b'')) | baseline = set(self._walkonetree(self._tree, match, b'')) | ||||
deleted = {p for p, v in self._pending_changes.items() if v is None} | deleted = {p for p, v in self._pending_changes.items() if v is None} | ||||
pend = {p for p in self._pending_changes if match(p)} | pend = {p for p in self._pending_changes if match(p)} | ||||
return iter(sorted((baseline | pend) - deleted)) | return iter(sorted((baseline | pend) - deleted)) | ||||
@interfaceutil.implementer(repository.imanifestrevisionstored) | @interfaceutil.implementer(repository.imanifestrevisionstored) | ||||
class gittreemanifestctx(object): | class gittreemanifestctx: | ||||
def __init__(self, repo, gittree): | def __init__(self, repo, gittree): | ||||
self._repo = repo | self._repo = repo | ||||
self._tree = gittree | self._tree = gittree | ||||
def read(self): | def read(self): | ||||
return gittreemanifest(self._repo, self._tree, None) | return gittreemanifest(self._repo, self._tree, None) | ||||
def readfast(self, shallow=False): | def readfast(self, shallow=False): | ||||
return self.read() | return self.read() | ||||
def copy(self): | def copy(self): | ||||
# NB: it's important that we return a memgittreemanifestctx | # NB: it's important that we return a memgittreemanifestctx | ||||
# because the caller expects a mutable manifest. | # because the caller expects a mutable manifest. | ||||
return memgittreemanifestctx(self._repo, self._tree) | return memgittreemanifestctx(self._repo, self._tree) | ||||
def find(self, path): | def find(self, path): | ||||
return self.read()[path] | return self.read()[path] | ||||
@interfaceutil.implementer(repository.imanifestrevisionwritable) | @interfaceutil.implementer(repository.imanifestrevisionwritable) | ||||
class memgittreemanifestctx(object): | class memgittreemanifestctx: | ||||
def __init__(self, repo, tree): | def __init__(self, repo, tree): | ||||
self._repo = repo | self._repo = repo | ||||
self._tree = tree | self._tree = tree | ||||
# dict of path: Optional[Tuple(node, flags)] | # dict of path: Optional[Tuple(node, flags)] | ||||
self._pending_changes = {} | self._pending_changes = {} | ||||
def read(self): | def read(self): | ||||
return gittreemanifest(self._repo, self._tree, self._pending_changes) | return gittreemanifest(self._repo, self._tree, self._pending_changes) |
(k, convert(v)) if isinstance(v, bytes) else (k, v) | (k, convert(v)) if isinstance(v, bytes) else (k, v) | ||||
for k, v in opts.items() | for k, v in opts.items() | ||||
] | ] | ||||
) | ) | ||||
return args, opts | return args, opts | ||||
class Command(object): | class Command: | ||||
def __init__(self, name): | def __init__(self, name): | ||||
self.name = name | self.name = name | ||||
self.args = [] | self.args = [] | ||||
self.opts = {} | self.opts = {} | ||||
def __bytes__(self): | def __bytes__(self): | ||||
cmd = b"hg " + self.name | cmd = b"hg " + self.name | ||||
if self.opts: | if self.opts: | ||||
def __setitem__(self, key, value): | def __setitem__(self, key, value): | ||||
values = self.opts.setdefault(key, []) | values = self.opts.setdefault(key, []) | ||||
values.append(value) | values.append(value) | ||||
def __and__(self, other): | def __and__(self, other): | ||||
return AndCommand(self, other) | return AndCommand(self, other) | ||||
class AndCommand(object): | class AndCommand: | ||||
def __init__(self, left, right): | def __init__(self, left, right): | ||||
self.left = left | self.left = left | ||||
self.right = right | self.right = right | ||||
def __str__(self): | def __str__(self): | ||||
return b"%s && %s" % (self.left, self.right) | return b"%s && %s" % (self.left, self.right) | ||||
def __and__(self, other): | def __and__(self, other): |
# Custom help category | # Custom help category | ||||
_HELP_CATEGORY = b'gpg' | _HELP_CATEGORY = b'gpg' | ||||
help.CATEGORY_ORDER.insert( | help.CATEGORY_ORDER.insert( | ||||
help.CATEGORY_ORDER.index(registrar.command.CATEGORY_HELP), _HELP_CATEGORY | help.CATEGORY_ORDER.index(registrar.command.CATEGORY_HELP), _HELP_CATEGORY | ||||
) | ) | ||||
help.CATEGORY_NAMES[_HELP_CATEGORY] = b'Signing changes (GPG)' | help.CATEGORY_NAMES[_HELP_CATEGORY] = b'Signing changes (GPG)' | ||||
class gpg(object): | class gpg: | ||||
def __init__(self, path, key=None): | def __init__(self, path, key=None): | ||||
self.path = path | self.path = path | ||||
self.key = (key and b" --local-user \"%s\"" % key) or b"" | self.key = (key and b" --local-user \"%s\"" % key) or b"" | ||||
def sign(self, data): | def sign(self, data): | ||||
gpgcmd = b"%s --sign --detach-sign%s" % (self.path, self.key) | gpgcmd = b"%s --sign --detach-sign%s" % (self.path, self.key) | ||||
return procutil.filter(data, gpgcmd) | return procutil.filter(data, gpgcmd) | ||||
b"will DISCARD it from the edited history!" | b"will DISCARD it from the edited history!" | ||||
) | ) | ||||
lines = (intro % (first, last)).split(b'\n') + actions + hints | lines = (intro % (first, last)).split(b'\n') + actions + hints | ||||
return b''.join([b'# %s\n' % l if l else b'#\n' for l in lines]) | return b''.join([b'# %s\n' % l if l else b'#\n' for l in lines]) | ||||
class histeditstate(object): | class histeditstate: | ||||
def __init__(self, repo): | def __init__(self, repo): | ||||
self.repo = repo | self.repo = repo | ||||
self.actions = None | self.actions = None | ||||
self.keep = None | self.keep = None | ||||
self.topmost = None | self.topmost = None | ||||
self.parentctxnode = None | self.parentctxnode = None | ||||
self.lock = None | self.lock = None | ||||
self.wlock = None | self.wlock = None | ||||
def clear(self): | def clear(self): | ||||
if self.inprogress(): | if self.inprogress(): | ||||
self.repo.vfs.unlink(b'histedit-state') | self.repo.vfs.unlink(b'histedit-state') | ||||
def inprogress(self): | def inprogress(self): | ||||
return self.repo.vfs.exists(b'histedit-state') | return self.repo.vfs.exists(b'histedit-state') | ||||
class histeditaction(object): | class histeditaction: | ||||
def __init__(self, state, node): | def __init__(self, state, node): | ||||
self.state = state | self.state = state | ||||
self.repo = state.repo | self.repo = state.repo | ||||
self.node = node | self.node = node | ||||
@classmethod | @classmethod | ||||
def fromrule(cls, state, rule): | def fromrule(cls, state, rule): | ||||
"""Parses the given rule, returning an instance of the histeditaction.""" | """Parses the given rule, returning an instance of the histeditaction.""" | ||||
MODE_HELP: {}, | MODE_HELP: {}, | ||||
} | } | ||||
def screen_size(): | def screen_size(): | ||||
return struct.unpack(b'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, b' ')) | return struct.unpack(b'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, b' ')) | ||||
class histeditrule(object): | class histeditrule: | ||||
def __init__(self, ui, ctx, pos, action=b'pick'): | def __init__(self, ui, ctx, pos, action=b'pick'): | ||||
self.ui = ui | self.ui = ui | ||||
self.ctx = ctx | self.ctx = ctx | ||||
self.action = action | self.action = action | ||||
self.origpos = pos | self.origpos = pos | ||||
self.pos = pos | self.pos = pos | ||||
self.conflicts = [] | self.conflicts = [] | ||||
def _trunc_tail(line, n): | def _trunc_tail(line, n): | ||||
if len(line) <= n: | if len(line) <= n: | ||||
return line | return line | ||||
return line[: n - 2] + b' >' | return line[: n - 2] + b' >' | ||||
class _chistedit_state(object): | class _chistedit_state: | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
repo, | repo, | ||||
rules, | rules, | ||||
stdscr, | stdscr, | ||||
): | ): | ||||
self.repo = repo | self.repo = repo | ||||
self.rules = rules | self.rules = rules |
if common.isremotebooksenabled(ui): | if common.isremotebooksenabled(ui): | ||||
hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/' | hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/' | ||||
if remotebookmark.startswith(hoist): | if remotebookmark.startswith(hoist): | ||||
return remotebookmark[len(hoist) :] | return remotebookmark[len(hoist) :] | ||||
return remotebookmark | return remotebookmark | ||||
class bundlestore(object): | class bundlestore: | ||||
def __init__(self, repo): | def __init__(self, repo): | ||||
self._repo = repo | self._repo = repo | ||||
storetype = self._repo.ui.config(b'infinitepush', b'storetype') | storetype = self._repo.ui.config(b'infinitepush', b'storetype') | ||||
if storetype == b'disk': | if storetype == b'disk': | ||||
from . import store | from . import store | ||||
self.store = store.filebundlestore(self._repo.ui, self._repo) | self.store = store.filebundlestore(self._repo.ui, self._repo) | ||||
elif storetype == b'external': | elif storetype == b'external': |
try: | try: | ||||
lfsmod = extensions.find(b'lfs') | lfsmod = extensions.find(b'lfs') | ||||
lfsmod.wrapper.uploadblobsfromrevs(repo, missing) | lfsmod.wrapper.uploadblobsfromrevs(repo, missing) | ||||
except KeyError: | except KeyError: | ||||
# Ignore if lfs extension is not enabled | # Ignore if lfs extension is not enabled | ||||
return | return | ||||
class copiedpart(object): | class copiedpart: | ||||
"""a copy of unbundlepart content that can be consumed later""" | """a copy of unbundlepart content that can be consumed later""" | ||||
def __init__(self, part): | def __init__(self, part): | ||||
# copy "public properties" | # copy "public properties" | ||||
self.type = part.type | self.type = part.type | ||||
self.id = part.id | self.id = part.id | ||||
self.mandatory = part.mandatory | self.mandatory = part.mandatory | ||||
self.mandatoryparams = part.mandatoryparams | self.mandatoryparams = part.mandatoryparams |
# Infinite push | # Infinite push | ||||
# | # | ||||
# Copyright 2016 Facebook, Inc. | # Copyright 2016 Facebook, Inc. | ||||
# | # | ||||
# This software may be used and distributed according to the terms of the | # This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | # GNU General Public License version 2 or any later version. | ||||
class indexapi(object): | class indexapi: | ||||
"""Class that manages access to infinitepush index. | """Class that manages access to infinitepush index. | ||||
This class is a context manager and all write operations (like | This class is a context manager and all write operations (like | ||||
deletebookmarks, addbookmark etc) should use `with` statement: | deletebookmarks, addbookmark etc) should use `with` statement: | ||||
with index: | with index: | ||||
index.deletebookmarks(...) | index.deletebookmarks(...) | ||||
... | ... |
class BundleWriteException(Exception): | class BundleWriteException(Exception): | ||||
pass | pass | ||||
class BundleReadException(Exception): | class BundleReadException(Exception): | ||||
pass | pass | ||||
class abstractbundlestore(object): # pytype: disable=ignored-metaclass | class abstractbundlestore: # pytype: disable=ignored-metaclass | ||||
"""Defines the interface for bundle stores. | """Defines the interface for bundle stores. | ||||
A bundle store is an entity that stores raw bundle data. It is a simple | A bundle store is an entity that stores raw bundle data. It is a simple | ||||
key-value store. However, the keys are chosen by the store. The keys can | key-value store. However, the keys are chosen by the store. The keys can | ||||
be any Python object understood by the corresponding bundle index (see | be any Python object understood by the corresponding bundle index (see | ||||
``abstractbundleindex`` below). | ``abstractbundleindex`` below). | ||||
""" | """ | ||||
Returns None if the bundle isn't known. | Returns None if the bundle isn't known. | ||||
Throws BundleReadException | Throws BundleReadException | ||||
The returned object should be a file object supporting read() | The returned object should be a file object supporting read() | ||||
and close(). | and close(). | ||||
""" | """ | ||||
class filebundlestore(object): | class filebundlestore: | ||||
"""bundle store in filesystem | """bundle store in filesystem | ||||
meant for storing bundles somewhere on disk and on network filesystems | meant for storing bundles somewhere on disk and on network filesystems | ||||
""" | """ | ||||
def __init__(self, ui, repo): | def __init__(self, ui, repo): | ||||
self.ui = ui | self.ui = ui | ||||
self.repo = repo | self.repo = repo |
oldhashes, | oldhashes, | ||||
newhashes, | newhashes, | ||||
) | ) | ||||
) | ) | ||||
__str__ = encoding.strmethod(__bytes__) | __str__ = encoding.strmethod(__bytes__) | ||||
class journalstorage(object): | class journalstorage: | ||||
"""Storage for journal entries | """Storage for journal entries | ||||
Entries are divided over two files; one with entries that pertain to the | Entries are divided over two files; one with entries that pertain to the | ||||
local working copy *only*, and one with entries that are shared across | local working copy *only*, and one with entries that are shared across | ||||
multiple working copies when shared using the share extension. | multiple working copies when shared using the share extension. | ||||
Entries are stored with NUL bytes as separators. See the journalentry | Entries are stored with NUL bytes as separators. See the journalentry | ||||
class for the per-entry structure. | class for the per-entry structure. |
"""Retrieves modified and added files from a working directory state | """Retrieves modified and added files from a working directory state | ||||
and returns the subset of each contained in given changed files | and returns the subset of each contained in given changed files | ||||
retrieved from a change context.""" | retrieved from a change context.""" | ||||
modified = [f for f in wstatus.modified if f in changed] | modified = [f for f in wstatus.modified if f in changed] | ||||
added = [f for f in wstatus.added if f in changed] | added = [f for f in wstatus.added if f in changed] | ||||
return modified, added | return modified, added | ||||
class kwtemplater(object): | class kwtemplater: | ||||
""" | """ | ||||
Sets up keyword templates, corresponding keyword regex, and | Sets up keyword templates, corresponding keyword regex, and | ||||
provides keyword substitution functions. | provides keyword substitution functions. | ||||
""" | """ | ||||
def __init__(self, ui, repo, inc, exc): | def __init__(self, ui, repo, inc, exc): | ||||
self.ui = ui | self.ui = ui | ||||
self._repo = weakref.ref(repo) | self._repo = weakref.ref(repo) |
self.filename, | self.filename, | ||||
self.detail, | self.detail, | ||||
) | ) | ||||
def __str__(self): | def __str__(self): | ||||
return b"%s: %s" % (urlutil.hidepassword(self.url), self.detail) | return b"%s: %s" % (urlutil.hidepassword(self.url), self.detail) | ||||
class basestore(object): | class basestore: | ||||
def __init__(self, ui, repo, url): | def __init__(self, ui, repo, url): | ||||
self.ui = ui | self.ui = ui | ||||
self.repo = repo | self.repo = repo | ||||
self.url = url | self.url = url | ||||
def put(self, source, hash): | def put(self, source, hash): | ||||
'''Put source file into the store so it can be retrieved by hash.''' | '''Put source file into the store so it can be retrieved by hash.''' | ||||
raise NotImplementedError(b'abstract method') | raise NotImplementedError(b'abstract method') |
else: | else: | ||||
return f in standins | return f in standins | ||||
match.matchfn = matchfn | match.matchfn = matchfn | ||||
return match | return match | ||||
class automatedcommithook(object): | class automatedcommithook: | ||||
"""Stateful hook to update standins at the 1st commit of resuming | """Stateful hook to update standins at the 1st commit of resuming | ||||
For efficiency, updating standins in the working directory should | For efficiency, updating standins in the working directory should | ||||
be avoided while automated committing (like rebase, transplant and | be avoided while automated committing (like rebase, transplant and | ||||
so on), because they should be updated before committing. | so on), because they should be updated before committing. | ||||
But the 1st commit of resuming automated committing (e.g. ``rebase | But the 1st commit of resuming automated committing (e.g. ``rebase | ||||
--continue``) should update them, because largefiles may be | --continue``) should update them, because largefiles may be |
@eh.wrapcommand( | @eh.wrapcommand( | ||||
b'debugstate', | b'debugstate', | ||||
opts=[(b'', b'large', None, _(b'display largefiles dirstate'))], | opts=[(b'', b'large', None, _(b'display largefiles dirstate'))], | ||||
) | ) | ||||
def overridedebugstate(orig, ui, repo, *pats, **opts): | def overridedebugstate(orig, ui, repo, *pats, **opts): | ||||
large = opts.pop('large', False) | large = opts.pop('large', False) | ||||
if large: | if large: | ||||
class fakerepo(object): | class fakerepo: | ||||
dirstate = lfutil.openlfdirstate(ui, repo) | dirstate = lfutil.openlfdirstate(ui, repo) | ||||
orig(ui, fakerepo, *pats, **opts) | orig(ui, fakerepo, *pats, **opts) | ||||
else: | else: | ||||
orig(ui, repo, *pats, **opts) | orig(ui, repo, *pats, **opts) | ||||
# Before starting the manifest merge, merge.updates will call | # Before starting the manifest merge, merge.updates will call |
def __init__(self, ui, filename): | def __init__(self, ui, filename): | ||||
super(lfsuploadfile, self).__init__(ui, filename, b'rb') | super(lfsuploadfile, self).__init__(ui, filename, b'rb') | ||||
self.read = self._data.read | self.read = self._data.read | ||||
def _makeprogress(self): | def _makeprogress(self): | ||||
return None # progress is handled by the worker client | return None # progress is handled by the worker client | ||||
class local(object): | class local: | ||||
"""Local blobstore for large file contents. | """Local blobstore for large file contents. | ||||
This blobstore is used both as a cache and as a staging area for large blobs | This blobstore is used both as a cache and as a staging area for large blobs | ||||
to be uploaded to the remote blobstore. | to be uploaded to the remote blobstore. | ||||
""" | """ | ||||
def __init__(self, repo): | def __init__(self, repo): | ||||
fullpath = repo.svfs.join(b'lfs/objects') | fullpath = repo.svfs.join(b'lfs/objects') | ||||
code, | code, | ||||
encoding.strfromlocal(msg), | encoding.strfromlocal(msg), | ||||
headers, | headers, | ||||
fp, | fp, | ||||
) | ) | ||||
return None | return None | ||||
class _gitlfsremote(object): | class _gitlfsremote: | ||||
def __init__(self, repo, url): | def __init__(self, repo, url): | ||||
ui = repo.ui | ui = repo.ui | ||||
self.ui = ui | self.ui = ui | ||||
baseurl, authinfo = url.authinfo() | baseurl, authinfo = url.authinfo() | ||||
self.baseurl = baseurl.rstrip(b'/') | self.baseurl = baseurl.rstrip(b'/') | ||||
useragent = repo.ui.config(b'experimental', b'lfs.user-agent') | useragent = repo.ui.config(b'experimental', b'lfs.user-agent') | ||||
if not useragent: | if not useragent: | ||||
useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version() | useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version() | ||||
# copied from mercurial/httppeer.py | # copied from mercurial/httppeer.py | ||||
urlopener = getattr(self, 'urlopener', None) | urlopener = getattr(self, 'urlopener', None) | ||||
if urlopener: | if urlopener: | ||||
for h in urlopener.handlers: | for h in urlopener.handlers: | ||||
h.close() | h.close() | ||||
getattr(h, "close_all", lambda: None)() | getattr(h, "close_all", lambda: None)() | ||||
class _dummyremote(object): | class _dummyremote: | ||||
"""Dummy store storing blobs to temp directory.""" | """Dummy store storing blobs to temp directory.""" | ||||
def __init__(self, repo, url): | def __init__(self, repo, url): | ||||
fullpath = repo.vfs.join(b'lfs', url.path) | fullpath = repo.vfs.join(b'lfs', url.path) | ||||
self.vfs = lfsvfs(fullpath) | self.vfs = lfsvfs(fullpath) | ||||
def writebatch(self, pointers, fromstore): | def writebatch(self, pointers, fromstore): | ||||
for p in _deduplicate(pointers): | for p in _deduplicate(pointers): | ||||
content = fromstore.read(p.oid(), verify=True) | content = fromstore.read(p.oid(), verify=True) | ||||
with self.vfs(p.oid(), b'wb', atomictemp=True) as fp: | with self.vfs(p.oid(), b'wb', atomictemp=True) as fp: | ||||
fp.write(content) | fp.write(content) | ||||
def readbatch(self, pointers, tostore): | def readbatch(self, pointers, tostore): | ||||
for p in _deduplicate(pointers): | for p in _deduplicate(pointers): | ||||
with self.vfs(p.oid(), b'rb') as fp: | with self.vfs(p.oid(), b'rb') as fp: | ||||
tostore.download(p.oid(), fp, None) | tostore.download(p.oid(), fp, None) | ||||
class _nullremote(object): | class _nullremote: | ||||
"""Null store storing blobs to /dev/null.""" | """Null store storing blobs to /dev/null.""" | ||||
def __init__(self, repo, url): | def __init__(self, repo, url): | ||||
pass | pass | ||||
def writebatch(self, pointers, fromstore): | def writebatch(self, pointers, fromstore): | ||||
pass | pass | ||||
def readbatch(self, pointers, tostore): | def readbatch(self, pointers, tostore): | ||||
pass | pass | ||||
class _promptremote(object): | class _promptremote: | ||||
"""Prompt user to set lfs.url when accessed.""" | """Prompt user to set lfs.url when accessed.""" | ||||
def __init__(self, repo, url): | def __init__(self, repo, url): | ||||
pass | pass | ||||
def writebatch(self, pointers, fromstore, ui=None): | def writebatch(self, pointers, fromstore, ui=None): | ||||
self._prompt() | self._prompt() | ||||
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | ||||
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | ||||
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
class processlogger(object): | class processlogger: | ||||
"""Map log events to external commands | """Map log events to external commands | ||||
Arguments are passed on as environment variables. | Arguments are passed on as environment variables. | ||||
""" | """ | ||||
def __init__(self, ui): | def __init__(self, ui): | ||||
self._scripts = dict(ui.configitems(b'logtoprocess')) | self._scripts = dict(ui.configitems(b'logtoprocess')) | ||||
) | ) | ||||
# force load strip extension formerly included in mq and import some utility | # force load strip extension formerly included in mq and import some utility | ||||
try: | try: | ||||
extensions.find(b'strip') | extensions.find(b'strip') | ||||
except KeyError: | except KeyError: | ||||
# note: load is lazy so we could avoid the try-except, | # note: load is lazy so we could avoid the try-except, | ||||
# but I (marmoute) prefer this explicit code. | # but I (marmoute) prefer this explicit code. | ||||
class dummyui(object): | class dummyui: | ||||
def debug(self, msg): | def debug(self, msg): | ||||
pass | pass | ||||
def log(self, event, msgfmt, *msgargs, **opts): | def log(self, event, msgfmt, *msgargs, **opts): | ||||
pass | pass | ||||
extensions.load(dummyui(), b'strip', b'') | extensions.load(dummyui(), b'strip', b'') | ||||
return inclsubs | return inclsubs | ||||
# Patch names looks like unix-file names. | # Patch names looks like unix-file names. | ||||
# They must be joinable with queue directory and result in the patch path. | # They must be joinable with queue directory and result in the patch path. | ||||
normname = util.normpath | normname = util.normpath | ||||
class statusentry(object): | class statusentry: | ||||
def __init__(self, node, name): | def __init__(self, node, name): | ||||
self.node, self.name = node, name | self.node, self.name = node, name | ||||
def __bytes__(self): | def __bytes__(self): | ||||
return hex(self.node) + b':' + self.name | return hex(self.node) + b':' + self.name | ||||
__str__ = encoding.strmethod(__bytes__) | __str__ = encoding.strmethod(__bytes__) | ||||
__repr__ = encoding.strmethod(__bytes__) | __repr__ = encoding.strmethod(__bytes__) | ||||
lines.insert(i, b'') | lines.insert(i, b'') | ||||
if i < bestpos: | if i < bestpos: | ||||
bestpos = i | bestpos = i | ||||
break | break | ||||
lines.insert(bestpos, b'%s: %s' % (header, value)) | lines.insert(bestpos, b'%s: %s' % (header, value)) | ||||
return lines | return lines | ||||
class patchheader(object): | class patchheader: | ||||
def __init__(self, pf, plainmode=False): | def __init__(self, pf, plainmode=False): | ||||
def eatdiff(lines): | def eatdiff(lines): | ||||
while lines: | while lines: | ||||
l = lines[-1] | l = lines[-1] | ||||
if ( | if ( | ||||
l.startswith(b"diff -") | l.startswith(b"diff -") | ||||
or l.startswith(b"Index:") | or l.startswith(b"Index:") | ||||
or l.startswith(b"===========") | or l.startswith(b"===========") | ||||
repo.ui.setconfig(b'ui', b'allowemptycommit', True) | repo.ui.setconfig(b'ui', b'allowemptycommit', True) | ||||
return repo.commit(*args, **kwargs) | return repo.commit(*args, **kwargs) | ||||
class AbortNoCleanup(error.Abort): | class AbortNoCleanup(error.Abort): | ||||
pass | pass | ||||
class queue(object): | class queue: | ||||
def __init__(self, ui, baseui, path, patchdir=None): | def __init__(self, ui, baseui, path, patchdir=None): | ||||
self.basepath = path | self.basepath = path | ||||
try: | try: | ||||
with open(os.path.join(path, b'patches.queue'), 'rb') as fh: | with open(os.path.join(path, b'patches.queue'), 'rb') as fh: | ||||
cur = fh.read().rstrip() | cur = fh.read().rstrip() | ||||
if not cur: | if not cur: | ||||
curpath = os.path.join(path, b'patches') | curpath = os.path.join(path, b'patches') |
summary: {desc|firstline} | summary: {desc|firstline} | ||||
''' | ''' | ||||
deftemplates = { | deftemplates = { | ||||
b'changegroup': multiple_template, | b'changegroup': multiple_template, | ||||
} | } | ||||
class notifier(object): | class notifier: | ||||
'''email notification class.''' | '''email notification class.''' | ||||
def __init__(self, ui, repo, hooktype): | def __init__(self, ui, repo, hooktype): | ||||
self.ui = ui | self.ui = ui | ||||
cfg = self.ui.config(b'notify', b'config') | cfg = self.ui.config(b'notify', b'config') | ||||
if cfg: | if cfg: | ||||
self.ui.readconfig(cfg, sections=[b'usersubs', b'reposubs']) | self.ui.readconfig(cfg, sections=[b'usersubs', b'reposubs']) | ||||
self.repo = repo | self.repo = repo |
output = util.stringio() | output = util.stringio() | ||||
for chunk, _label in patch.diffui( | for chunk, _label in patch.diffui( | ||||
ctx.repo(), basectx.p1().node(), ctx.node(), None, opts=diffopts | ctx.repo(), basectx.p1().node(), ctx.node(), None, opts=diffopts | ||||
): | ): | ||||
output.write(chunk) | output.write(chunk) | ||||
return output.getvalue() | return output.getvalue() | ||||
class DiffChangeType(object): | class DiffChangeType: | ||||
ADD = 1 | ADD = 1 | ||||
CHANGE = 2 | CHANGE = 2 | ||||
DELETE = 3 | DELETE = 3 | ||||
MOVE_AWAY = 4 | MOVE_AWAY = 4 | ||||
COPY_AWAY = 5 | COPY_AWAY = 5 | ||||
MOVE_HERE = 6 | MOVE_HERE = 6 | ||||
COPY_HERE = 7 | COPY_HERE = 7 | ||||
MULTICOPY = 8 | MULTICOPY = 8 | ||||
class DiffFileType(object): | class DiffFileType: | ||||
TEXT = 1 | TEXT = 1 | ||||
IMAGE = 2 | IMAGE = 2 | ||||
BINARY = 3 | BINARY = 3 | ||||
@attr.s | @attr.s | ||||
class phabhunk(dict): | class phabhunk(dict): | ||||
"""Represents a Differential hunk, which is owned by a Differential change""" | """Represents a Differential hunk, which is owned by a Differential change""" | ||||
oldOffset = attr.ib(default=0) # camelcase-required | oldOffset = attr.ib(default=0) # camelcase-required | ||||
oldLength = attr.ib(default=0) # camelcase-required | oldLength = attr.ib(default=0) # camelcase-required | ||||
newOffset = attr.ib(default=0) # camelcase-required | newOffset = attr.ib(default=0) # camelcase-required | ||||
newLength = attr.ib(default=0) # camelcase-required | newLength = attr.ib(default=0) # camelcase-required | ||||
corpus = attr.ib(default='') | corpus = attr.ib(default='') | ||||
# These get added to the phabchange's equivalents | # These get added to the phabchange's equivalents | ||||
addLines = attr.ib(default=0) # camelcase-required | addLines = attr.ib(default=0) # camelcase-required | ||||
delLines = attr.ib(default=0) # camelcase-required | delLines = attr.ib(default=0) # camelcase-required | ||||
@attr.s | @attr.s | ||||
class phabchange(object): | class phabchange: | ||||
"""Represents a Differential change, owns Differential hunks and owned by a | """Represents a Differential change, owns Differential hunks and owned by a | ||||
Differential diff. Each one represents one file in a diff. | Differential diff. Each one represents one file in a diff. | ||||
""" | """ | ||||
currentPath = attr.ib(default=None) # camelcase-required | currentPath = attr.ib(default=None) # camelcase-required | ||||
oldPath = attr.ib(default=None) # camelcase-required | oldPath = attr.ib(default=None) # camelcase-required | ||||
awayPaths = attr.ib(default=attr.Factory(list)) # camelcase-required | awayPaths = attr.ib(default=attr.Factory(list)) # camelcase-required | ||||
metadata = attr.ib(default=attr.Factory(dict)) | metadata = attr.ib(default=attr.Factory(dict)) | ||||
# It's useful to include these stats since the Phab web UI shows them, | # It's useful to include these stats since the Phab web UI shows them, | ||||
# and uses them to estimate how large a change a Revision is. Also used | # and uses them to estimate how large a change a Revision is. Also used | ||||
# in email subjects for the [+++--] bit. | # in email subjects for the [+++--] bit. | ||||
self.addLines += hunk.addLines | self.addLines += hunk.addLines | ||||
self.delLines += hunk.delLines | self.delLines += hunk.delLines | ||||
@attr.s | @attr.s | ||||
class phabdiff(object): | class phabdiff: | ||||
"""Represents a Differential diff, owns Differential changes. Corresponds | """Represents a Differential diff, owns Differential changes. Corresponds | ||||
to a commit. | to a commit. | ||||
""" | """ | ||||
# Doesn't seem to be any reason to send this (output of uname -n) | # Doesn't seem to be any reason to send this (output of uname -n) | ||||
sourceMachine = attr.ib(default=b'') # camelcase-required | sourceMachine = attr.ib(default=b'') # camelcase-required | ||||
sourcePath = attr.ib(default=b'/') # camelcase-required | sourcePath = attr.ib(default=b'/') # camelcase-required | ||||
sourceControlBaseRevision = attr.ib(default=b'0' * 40) # camelcase-required | sourceControlBaseRevision = attr.ib(default=b'0' * 40) # camelcase-required |
def _ctxdesc(ctx): | def _ctxdesc(ctx): | ||||
"""short description for a context""" | """short description for a context""" | ||||
return cmdutil.format_changeset_summary( | return cmdutil.format_changeset_summary( | ||||
ctx.repo().ui, ctx, command=b'rebase' | ctx.repo().ui, ctx, command=b'rebase' | ||||
) | ) | ||||
class rebaseruntime(object): | class rebaseruntime: | ||||
"""This class is a container for rebase runtime state""" | """This class is a container for rebase runtime state""" | ||||
def __init__(self, repo, ui, inmemory=False, dryrun=False, opts=None): | def __init__(self, repo, ui, inmemory=False, dryrun=False, opts=None): | ||||
if opts is None: | if opts is None: | ||||
opts = {} | opts = {} | ||||
# prepared: whether we have rebasestate prepared or not. Currently it | # prepared: whether we have rebasestate prepared or not. Currently it | ||||
# decides whether "self.repo" is unfiltered or not. | # decides whether "self.repo" is unfiltered or not. |
] | ] | ||||
RE_DIRECTIVE = re.compile(br'^\.\. ([a-zA-Z0-9_]+)::\s*([^$]+)?$') | RE_DIRECTIVE = re.compile(br'^\.\. ([a-zA-Z0-9_]+)::\s*([^$]+)?$') | ||||
RE_ISSUE = br'\bissue ?[0-9]{4,6}(?![0-9])\b' | RE_ISSUE = br'\bissue ?[0-9]{4,6}(?![0-9])\b' | ||||
BULLET_SECTION = _(b'Other Changes') | BULLET_SECTION = _(b'Other Changes') | ||||
class parsedreleasenotes(object): | class parsedreleasenotes: | ||||
def __init__(self): | def __init__(self): | ||||
self.sections = {} | self.sections = {} | ||||
def __contains__(self, section): | def __contains__(self, section): | ||||
return section in self.sections | return section in self.sections | ||||
def __iter__(self): | def __iter__(self): | ||||
return iter(sorted(self.sections)) | return iter(sorted(self.sections)) | ||||
continue | continue | ||||
if similar(ui, existingnotes, incoming_str): | if similar(ui, existingnotes, incoming_str): | ||||
continue | continue | ||||
self.addnontitleditem(section, paragraphs) | self.addnontitleditem(section, paragraphs) | ||||
class releasenotessections(object): | class releasenotessections: | ||||
def __init__(self, ui, repo=None): | def __init__(self, ui, repo=None): | ||||
if repo: | if repo: | ||||
sections = util.sortdict(DEFAULT_SECTIONS) | sections = util.sortdict(DEFAULT_SECTIONS) | ||||
custom_sections = getcustomadmonitions(repo) | custom_sections = getcustomadmonitions(repo) | ||||
if custom_sections: | if custom_sections: | ||||
sections.update(custom_sections) | sections.update(custom_sections) | ||||
self._sections = list(sections.items()) | self._sections = list(sections.items()) | ||||
else: | else: |
# With glibc 2.7+ the 'e' flag uses O_CLOEXEC when opening. | # With glibc 2.7+ the 'e' flag uses O_CLOEXEC when opening. | ||||
# The 'e' flag will be ignored on older versions of glibc. | # The 'e' flag will be ignored on older versions of glibc. | ||||
# Python 3 can't handle the 'e' flag. | # Python 3 can't handle the 'e' flag. | ||||
PACKOPENMODE = b'rbe' | PACKOPENMODE = b'rbe' | ||||
else: | else: | ||||
PACKOPENMODE = b'rb' | PACKOPENMODE = b'rb' | ||||
class _cachebackedpacks(object): | class _cachebackedpacks: | ||||
def __init__(self, packs, cachesize): | def __init__(self, packs, cachesize): | ||||
self._packs = set(packs) | self._packs = set(packs) | ||||
self._lrucache = util.lrucachedict(cachesize) | self._lrucache = util.lrucachedict(cachesize) | ||||
self._lastpack = None | self._lastpack = None | ||||
# Avoid cold start of the cache by populating the most recent packs | # Avoid cold start of the cache by populating the most recent packs | ||||
# in the cache. | # in the cache. | ||||
for i in reversed(range(min(cachesize, len(packs)))): | for i in reversed(range(min(cachesize, len(packs)))): | ||||
for pack in self._packs - cachedpacks: | for pack in self._packs - cachedpacks: | ||||
self._lastpack = pack | self._lastpack = pack | ||||
yield pack | yield pack | ||||
# Data not found in any pack. | # Data not found in any pack. | ||||
self._lastpack = None | self._lastpack = None | ||||
class basepackstore(object): | class basepackstore: | ||||
# Default cache size limit for the pack files. | # Default cache size limit for the pack files. | ||||
DEFAULTCACHESIZE = 100 | DEFAULTCACHESIZE = 100 | ||||
def __init__(self, ui, path): | def __init__(self, ui, path): | ||||
self.ui = ui | self.ui = ui | ||||
self.path = path | self.path = path | ||||
# lastrefesh is 0 so we'll immediately check for new packs on the first | # lastrefesh is 0 so we'll immediately check for new packs on the first | ||||
if filepath not in previous: | if filepath not in previous: | ||||
newpack = self.getpack(filepath) | newpack = self.getpack(filepath) | ||||
newpacks.append(newpack) | newpacks.append(newpack) | ||||
self.packs.add(newpack) | self.packs.add(newpack) | ||||
return newpacks | return newpacks | ||||
class versionmixin(object): | class versionmixin: | ||||
# Mix-in for classes with multiple supported versions | # Mix-in for classes with multiple supported versions | ||||
VERSION = None | VERSION = None | ||||
SUPPORTED_VERSIONS = [2] | SUPPORTED_VERSIONS = [2] | ||||
def _checkversion(self, version): | def _checkversion(self, version): | ||||
if version in self.SUPPORTED_VERSIONS: | if version in self.SUPPORTED_VERSIONS: | ||||
if self.VERSION is None: | if self.VERSION is None: | ||||
# only affect this instance | # only affect this instance | ||||
# <large fanout: 1 bit> # 1 means 2^16, 0 means 2^8 | # <large fanout: 1 bit> # 1 means 2^16, 0 means 2^8 | ||||
# <unused: 7 bit> # future use (compression, delta format, etc) | # <unused: 7 bit> # future use (compression, delta format, etc) | ||||
config = 0 | config = 0 | ||||
if indexparams.fanoutprefix == LARGEFANOUTPREFIX: | if indexparams.fanoutprefix == LARGEFANOUTPREFIX: | ||||
config = 0b10000000 | config = 0b10000000 | ||||
self.idxfp.write(struct.pack(b'!BB', self.VERSION, config)) | self.idxfp.write(struct.pack(b'!BB', self.VERSION, config)) | ||||
class indexparams(object): | class indexparams: | ||||
__slots__ = ( | __slots__ = ( | ||||
'fanoutprefix', | 'fanoutprefix', | ||||
'fanoutstruct', | 'fanoutstruct', | ||||
'fanoutcount', | 'fanoutcount', | ||||
'fanoutsize', | 'fanoutsize', | ||||
'indexstart', | 'indexstart', | ||||
) | ) | ||||
) | ) | ||||
from mercurial.utils import hashutil | from mercurial.utils import hashutil | ||||
from . import ( | from . import ( | ||||
constants, | constants, | ||||
shallowutil, | shallowutil, | ||||
) | ) | ||||
class basestore(object): | class basestore: | ||||
def __init__(self, repo, path, reponame, shared=False): | def __init__(self, repo, path, reponame, shared=False): | ||||
"""Creates a remotefilelog store object for the given repo name. | """Creates a remotefilelog store object for the given repo name. | ||||
`path` - The file path where this store keeps its data | `path` - The file path where this store keeps its data | ||||
`reponame` - The name of the repo. This is used to partition data from | `reponame` - The name of the repo. This is used to partition data from | ||||
many repos. | many repos. | ||||
`shared` - True if this store is a shared cache of data from the central | `shared` - True if this store is a shared cache of data from the central | ||||
server, for many repos on this machine. False means this store is for | server, for many repos on this machine. False means this store is for | ||||
removed, | removed, | ||||
count, | count, | ||||
float(originalsize) / 1024.0 / 1024.0 / 1024.0, | float(originalsize) / 1024.0 / 1024.0 / 1024.0, | ||||
float(size) / 1024.0 / 1024.0 / 1024.0, | float(size) / 1024.0 / 1024.0 / 1024.0, | ||||
) | ) | ||||
) | ) | ||||
class baseunionstore(object): | class baseunionstore: | ||||
def __init__(self, *args, **kwargs): | def __init__(self, *args, **kwargs): | ||||
# If one of the functions that iterates all of the stores is about to | # If one of the functions that iterates all of the stores is about to | ||||
# throw a KeyError, try this many times with a full refresh between | # throw a KeyError, try this many times with a full refresh between | ||||
# attempts. A repack operation may have moved data from one store to | # attempts. A repack operation may have moved data from one store to | ||||
# another while we were running. | # another while we were running. | ||||
self.numattempts = kwargs.get('numretries', 0) + 1 | self.numattempts = kwargs.get('numretries', 0) + 1 | ||||
# If not-None, call this function on every retry and if the attempts are | # If not-None, call this function on every retry and if the attempts are | ||||
# exhausted. | # exhausted. |
hg, | hg, | ||||
sshpeer, | sshpeer, | ||||
util, | util, | ||||
) | ) | ||||
_sshv1peer = sshpeer.sshv1peer | _sshv1peer = sshpeer.sshv1peer | ||||
class connectionpool(object): | class connectionpool: | ||||
def __init__(self, repo): | def __init__(self, repo): | ||||
self._repo = repo | self._repo = repo | ||||
self._pool = dict() | self._pool = dict() | ||||
def get(self, path): | def get(self, path): | ||||
pathpool = self._pool.get(path) | pathpool = self._pool.get(path) | ||||
if pathpool is None: | if pathpool is None: | ||||
pathpool = list() | pathpool = list() | ||||
def close(self): | def close(self): | ||||
for pathpool in self._pool.values(): | for pathpool in self._pool.values(): | ||||
for conn in pathpool: | for conn in pathpool: | ||||
conn.close() | conn.close() | ||||
del pathpool[:] | del pathpool[:] | ||||
class connection(object): | class connection: | ||||
def __init__(self, pool, peer): | def __init__(self, pool, peer): | ||||
self._pool = pool | self._pool = pool | ||||
self.peer = peer | self.peer = peer | ||||
def __enter__(self): | def __enter__(self): | ||||
return self | return self | ||||
def __exit__(self, type, value, traceback): | def __exit__(self, type, value, traceback): |
) | ) | ||||
from . import ( | from . import ( | ||||
basestore, | basestore, | ||||
constants, | constants, | ||||
shallowutil, | shallowutil, | ||||
) | ) | ||||
class ChainIndicies(object): | class ChainIndicies: | ||||
"""A static class for easy reference to the delta chain indicies.""" | """A static class for easy reference to the delta chain indicies.""" | ||||
# The filename of this revision delta | # The filename of this revision delta | ||||
NAME = 0 | NAME = 0 | ||||
# The mercurial file node for this revision delta | # The mercurial file node for this revision delta | ||||
NODE = 1 | NODE = 1 | ||||
# The filename of the delta base's revision. This is useful when delta | # The filename of the delta base's revision. This is useful when delta | ||||
# between different files (like in the case of a move or copy, we can delta | # between different files (like in the case of a move or copy, we can delta | ||||
def _updatemetacache(self, node, size, flags): | def _updatemetacache(self, node, size, flags): | ||||
self._sanitizemetacache() | self._sanitizemetacache() | ||||
if node == self._threaddata.metacache[0]: | if node == self._threaddata.metacache[0]: | ||||
return | return | ||||
meta = {constants.METAKEYFLAG: flags, constants.METAKEYSIZE: size} | meta = {constants.METAKEYFLAG: flags, constants.METAKEYSIZE: size} | ||||
self._threaddata.metacache = (node, meta) | self._threaddata.metacache = (node, meta) | ||||
class remotecontentstore(object): | class remotecontentstore: | ||||
def __init__(self, ui, fileservice, shared): | def __init__(self, ui, fileservice, shared): | ||||
self._fileservice = fileservice | self._fileservice = fileservice | ||||
# type(shared) is usually remotefilelogcontentstore | # type(shared) is usually remotefilelogcontentstore | ||||
self._shared = shared | self._shared = shared | ||||
def get(self, name, node): | def get(self, name, node): | ||||
self._fileservice.prefetch( | self._fileservice.prefetch( | ||||
[(name, hex(node))], force=True, fetchdata=True | [(name, hex(node))], force=True, fetchdata=True | ||||
def getmissing(self, keys): | def getmissing(self, keys): | ||||
return keys | return keys | ||||
def markledger(self, ledger, options=None): | def markledger(self, ledger, options=None): | ||||
pass | pass | ||||
class manifestrevlogstore(object): | class manifestrevlogstore: | ||||
def __init__(self, repo): | def __init__(self, repo): | ||||
self._store = repo.store | self._store = repo.store | ||||
self._svfs = repo.svfs | self._svfs = repo.svfs | ||||
self._revlogs = dict() | self._revlogs = dict() | ||||
self._cl = revlog.revlog(self._svfs, radix=b'00changelog.i') | self._cl = revlog.revlog(self._svfs, radix=b'00changelog.i') | ||||
self._repackstartlinkrev = 0 | self._repackstartlinkrev = 0 | ||||
def get(self, name, node): | def get(self, name, node): |
fctxs = sorted(fctxs, key=lambda x: x.linkrev()) | fctxs = sorted(fctxs, key=lambda x: x.linkrev()) | ||||
# add to revlog | # add to revlog | ||||
temppath = repo.sjoin(b'data/temprevlog.i') | temppath = repo.sjoin(b'data/temprevlog.i') | ||||
if os.path.exists(temppath): | if os.path.exists(temppath): | ||||
os.remove(temppath) | os.remove(temppath) | ||||
r = filelog.filelog(repo.svfs, b'temprevlog') | r = filelog.filelog(repo.svfs, b'temprevlog') | ||||
class faket(object): | class faket: | ||||
def add(self, a, b, c): | def add(self, a, b, c): | ||||
pass | pass | ||||
t = faket() | t = faket() | ||||
for fctx in fctxs: | for fctx in fctxs: | ||||
if fctx.node() not in repo: | if fctx.node() not in repo: | ||||
continue | continue | ||||
supertype = super(remotefilepeer, self) | supertype = super(remotefilepeer, self) | ||||
if not util.safehasattr(supertype, '_sendrequest'): | if not util.safehasattr(supertype, '_sendrequest'): | ||||
self._updatecallstreamopts(command, pycompat.byteskwargs(opts)) | self._updatecallstreamopts(command, pycompat.byteskwargs(opts)) | ||||
return super(remotefilepeer, self)._callstream(command, **opts) | return super(remotefilepeer, self)._callstream(command, **opts) | ||||
peer.__class__ = remotefilepeer | peer.__class__ = remotefilepeer | ||||
class cacheconnection(object): | class cacheconnection: | ||||
"""The connection for communicating with the remote cache. Performs | """The connection for communicating with the remote cache. Performs | ||||
gets and sets by communicating with an external process that has the | gets and sets by communicating with an external process that has the | ||||
cache-specific implementation. | cache-specific implementation. | ||||
""" | """ | ||||
def __init__(self): | def __init__(self): | ||||
self.pipeo = self.pipei = self.pipee = None | self.pipeo = self.pipei = self.pipee = None | ||||
self.subprocess = None | self.subprocess = None | ||||
progresstick() | progresstick() | ||||
writerthread.join() | writerthread.join() | ||||
# End the command | # End the command | ||||
pipeo.write(b'\n') | pipeo.write(b'\n') | ||||
pipeo.flush() | pipeo.flush() | ||||
class fileserverclient(object): | class fileserverclient: | ||||
"""A client for requesting files from the remote file server.""" | """A client for requesting files from the remote file server.""" | ||||
def __init__(self, repo): | def __init__(self, repo): | ||||
ui = repo.ui | ui = repo.ui | ||||
self.repo = repo | self.repo = repo | ||||
self.ui = ui | self.ui = ui | ||||
self.cacheprocess = ui.config(b"remotefilelog", b"cacheprocess") | self.cacheprocess = ui.config(b"remotefilelog", b"cacheprocess") | ||||
if self.cacheprocess: | if self.cacheprocess: | ||||
if self.cacheprocess: | if self.cacheprocess: | ||||
cmd = b"%s %s" % (self.cacheprocess, self.writedata._path) | cmd = b"%s %s" % (self.cacheprocess, self.writedata._path) | ||||
self.remotecache.connect(cmd) | self.remotecache.connect(cmd) | ||||
else: | else: | ||||
# If no cache process is specified, we fake one that always | # If no cache process is specified, we fake one that always | ||||
# returns cache misses. This enables tests to run easily | # returns cache misses. This enables tests to run easily | ||||
# and may eventually allow us to be a drop in replacement | # and may eventually allow us to be a drop in replacement | ||||
# for the largefiles extension. | # for the largefiles extension. | ||||
class simplecache(object): | class simplecache: | ||||
def __init__(self): | def __init__(self): | ||||
self.missingids = [] | self.missingids = [] | ||||
self.connected = True | self.connected = True | ||||
def close(self): | def close(self): | ||||
pass | pass | ||||
def request(self, value, flush=True): | def request(self, value, flush=True): |
return self.getancestors(name, node)[node] | return self.getancestors(name, node)[node] | ||||
def add(self, name, node, parents, linknode): | def add(self, name, node, parents, linknode): | ||||
raise RuntimeError( | raise RuntimeError( | ||||
b"cannot add metadata only to remotefilelog metadatastore" | b"cannot add metadata only to remotefilelog metadatastore" | ||||
) | ) | ||||
class remotemetadatastore(object): | class remotemetadatastore: | ||||
def __init__(self, ui, fileservice, shared): | def __init__(self, ui, fileservice, shared): | ||||
self._fileservice = fileservice | self._fileservice = fileservice | ||||
self._shared = shared | self._shared = shared | ||||
def getancestors(self, name, node, known=None): | def getancestors(self, name, node, known=None): | ||||
self._fileservice.prefetch( | self._fileservice.prefetch( | ||||
[(name, hex(node))], force=True, fetchdata=False, fetchhistory=True | [(name, hex(node))], force=True, fetchdata=False, fetchhistory=True | ||||
) | ) |
from . import ( | from . import ( | ||||
constants, | constants, | ||||
fileserverclient, | fileserverclient, | ||||
shallowutil, | shallowutil, | ||||
) | ) | ||||
class remotefilelognodemap(object): | class remotefilelognodemap: | ||||
def __init__(self, filename, store): | def __init__(self, filename, store): | ||||
self._filename = filename | self._filename = filename | ||||
self._store = store | self._store = store | ||||
def __contains__(self, node): | def __contains__(self, node): | ||||
missing = self._store.getmissing([(self._filename, node)]) | missing = self._store.getmissing([(self._filename, node)]) | ||||
return not bool(missing) | return not bool(missing) | ||||
def __get__(self, node): | def __get__(self, node): | ||||
if node not in self: | if node not in self: | ||||
raise KeyError(node) | raise KeyError(node) | ||||
return node | return node | ||||
class remotefilelog(object): | class remotefilelog: | ||||
_generaldelta = True | _generaldelta = True | ||||
_flagserrorclass = error.RevlogError | _flagserrorclass = error.RevlogError | ||||
def __init__(self, opener, path, repo): | def __init__(self, opener, path, repo): | ||||
self.opener = opener | self.opener = opener | ||||
self.filename = path | self.filename = path | ||||
self.repo = repo | self.repo = repo |
)(getflogheads) | )(getflogheads) | ||||
wireprotov1server.wireprotocommand( | wireprotov1server.wireprotocommand( | ||||
b'x_rfl_getfiles', b'', permission=b'pull' | b'x_rfl_getfiles', b'', permission=b'pull' | ||||
)(getfiles) | )(getfiles) | ||||
wireprotov1server.wireprotocommand( | wireprotov1server.wireprotocommand( | ||||
b'x_rfl_getfile', b'file node', permission=b'pull' | b'x_rfl_getfile', b'file node', permission=b'pull' | ||||
)(getfile) | )(getfile) | ||||
class streamstate(object): | class streamstate: | ||||
match = None | match = None | ||||
shallowremote = False | shallowremote = False | ||||
noflatmf = False | noflatmf = False | ||||
state = streamstate() | state = streamstate() | ||||
def stream_out_shallow(repo, proto, other): | def stream_out_shallow(repo, proto, other): | ||||
includepattern = None | includepattern = None |
else: | else: | ||||
# m is a manifest object | # m is a manifest object | ||||
for filename, filenode in m.items(): | for filename, filenode in m.items(): | ||||
keepkeys.add(keyfn(filename, filenode)) | keepkeys.add(keyfn(filename, filenode)) | ||||
return keepkeys | return keepkeys | ||||
class repacker(object): | class repacker: | ||||
"""Class for orchestrating the repack of data and history information into a | """Class for orchestrating the repack of data and history information into a | ||||
new format. | new format. | ||||
""" | """ | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
repo, | repo, | ||||
data, | data, | ||||
if p2 != self.repo.nullid: | if p2 != self.repo.nullid: | ||||
parents.append(p2) | parents.append(p2) | ||||
return parents | return parents | ||||
sortednodes = shallowutil.sortnodes(ancestors.keys(), parentfunc) | sortednodes = shallowutil.sortnodes(ancestors.keys(), parentfunc) | ||||
return sortednodes | return sortednodes | ||||
class repackledger(object): | class repackledger: | ||||
"""Storage for all the bookkeeping that happens during a repack. It contains | """Storage for all the bookkeeping that happens during a repack. It contains | ||||
the list of revisions being repacked, what happened to each revision, and | the list of revisions being repacked, what happened to each revision, and | ||||
which source store contained which revision originally (for later cleanup). | which source store contained which revision originally (for later cleanup). | ||||
""" | """ | ||||
def __init__(self): | def __init__(self): | ||||
self.entries = {} | self.entries = {} | ||||
self.sources = {} | self.sources = {} | ||||
self.entries[key] = value | self.entries[key] = value | ||||
return value | return value | ||||
def addcreated(self, value): | def addcreated(self, value): | ||||
self.created.add(value) | self.created.add(value) | ||||
class repackentry(object): | class repackentry: | ||||
"""Simple class representing a single revision entry in the repackledger.""" | """Simple class representing a single revision entry in the repackledger.""" | ||||
__slots__ = ( | __slots__ = ( | ||||
'filename', | 'filename', | ||||
'node', | 'node', | ||||
'datasource', | 'datasource', | ||||
'historysource', | 'historysource', | ||||
'datarepacked', | 'datarepacked', |
self._load() | self._load() | ||||
for k, vtup in self.potentialentries.items(): | for k, vtup in self.potentialentries.items(): | ||||
yield (k, [bin(vtup[0])]) | yield (k, [bin(vtup[0])]) | ||||
items = iteritems | items = iteritems | ||||
class remotenames(object): | class remotenames: | ||||
""" | """ | ||||
This class encapsulates all the remotenames state. It also contains | This class encapsulates all the remotenames state. It also contains | ||||
methods to access that state in convenient ways. Remotenames are lazy | methods to access that state in convenient ways. Remotenames are lazy | ||||
loaded. Whenever client code needs to ensure the freshest copy of | loaded. Whenever client code needs to ensure the freshest copy of | ||||
remotenames, use the `clearnames` method to force an eventual load. | remotenames, use the `clearnames` method to force an eventual load. | ||||
""" | """ | ||||
def __init__(self, repo, *args): | def __init__(self, repo, *args): |
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | ||||
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
_partre = re.compile(br'{(\d+)\}') | _partre = re.compile(br'{(\d+)\}') | ||||
class ShortRepository(object): | class ShortRepository: | ||||
def __init__(self, url, scheme, templater): | def __init__(self, url, scheme, templater): | ||||
self.scheme = scheme | self.scheme = scheme | ||||
self.templater = templater | self.templater = templater | ||||
self.url = url | self.url = url | ||||
try: | try: | ||||
self.parts = max(map(int, _partre.findall(self.url))) | self.parts = max(map(int, _partre.findall(self.url))) | ||||
except ValueError: | except ValueError: | ||||
self.parts = 0 | self.parts = 0 |
).fetchone()[0] | ).fetchone()[0] | ||||
class SQLiteStoreError(error.StorageError): | class SQLiteStoreError(error.StorageError): | ||||
pass | pass | ||||
@attr.s | @attr.s | ||||
class revisionentry(object): | class revisionentry: | ||||
rid = attr.ib() | rid = attr.ib() | ||||
rev = attr.ib() | rev = attr.ib() | ||||
node = attr.ib() | node = attr.ib() | ||||
p1rev = attr.ib() | p1rev = attr.ib() | ||||
p2rev = attr.ib() | p2rev = attr.ib() | ||||
p1node = attr.ib() | p1node = attr.ib() | ||||
p2node = attr.ib() | p2node = attr.ib() | ||||
linkrev = attr.ib() | linkrev = attr.ib() | ||||
flags = attr.ib() | flags = attr.ib() | ||||
@interfaceutil.implementer(repository.irevisiondelta) | @interfaceutil.implementer(repository.irevisiondelta) | ||||
@attr.s(slots=True) | @attr.s(slots=True) | ||||
class sqliterevisiondelta(object): | class sqliterevisiondelta: | ||||
node = attr.ib() | node = attr.ib() | ||||
p1node = attr.ib() | p1node = attr.ib() | ||||
p2node = attr.ib() | p2node = attr.ib() | ||||
basenode = attr.ib() | basenode = attr.ib() | ||||
flags = attr.ib() | flags = attr.ib() | ||||
baserevisionsize = attr.ib() | baserevisionsize = attr.ib() | ||||
revision = attr.ib() | revision = attr.ib() | ||||
delta = attr.ib() | delta = attr.ib() | ||||
sidedata = attr.ib() | sidedata = attr.ib() | ||||
protocol_flags = attr.ib() | protocol_flags = attr.ib() | ||||
linknode = attr.ib(default=None) | linknode = attr.ib(default=None) | ||||
@interfaceutil.implementer(repository.iverifyproblem) | @interfaceutil.implementer(repository.iverifyproblem) | ||||
@attr.s(frozen=True) | @attr.s(frozen=True) | ||||
class sqliteproblem(object): | class sqliteproblem: | ||||
warning = attr.ib(default=None) | warning = attr.ib(default=None) | ||||
error = attr.ib(default=None) | error = attr.ib(default=None) | ||||
node = attr.ib(default=None) | node = attr.ib(default=None) | ||||
@interfaceutil.implementer(repository.ifilestorage) | @interfaceutil.implementer(repository.ifilestorage) | ||||
class sqlitefilestore(object): | class sqlitefilestore: | ||||
"""Implements storage for an individual tracked path.""" | """Implements storage for an individual tracked path.""" | ||||
def __init__(self, db, path, compression): | def __init__(self, db, path, compression): | ||||
self.nullid = sha1nodeconstants.nullid | self.nullid = sha1nodeconstants.nullid | ||||
self._db = db | self._db = db | ||||
self._path = path | self._path = path | ||||
self._pathid = None | self._pathid = None | ||||
if createopts.get(b'shallowfilestore'): | if createopts.get(b'shallowfilestore'): | ||||
requirements.add(REQUIREMENT_SHALLOW_FILES) | requirements.add(REQUIREMENT_SHALLOW_FILES) | ||||
return requirements | return requirements | ||||
@interfaceutil.implementer(repository.ilocalrepositoryfilestorage) | @interfaceutil.implementer(repository.ilocalrepositoryfilestorage) | ||||
class sqlitefilestorage(object): | class sqlitefilestorage: | ||||
"""Repository file storage backed by SQLite.""" | """Repository file storage backed by SQLite.""" | ||||
def file(self, path): | def file(self, path): | ||||
if path[0] == b'/': | if path[0] == b'/': | ||||
path = path[1:] | path = path[1:] | ||||
if REQUIREMENT_ZSTD in self.requirements: | if REQUIREMENT_ZSTD in self.requirements: | ||||
compression = b'zstd' | compression = b'zstd' |
) | ) | ||||
configitem( | configitem( | ||||
b'transplant', | b'transplant', | ||||
b'log', | b'log', | ||||
default=None, | default=None, | ||||
) | ) | ||||
class transplantentry(object): | class transplantentry: | ||||
def __init__(self, lnode, rnode): | def __init__(self, lnode, rnode): | ||||
self.lnode = lnode | self.lnode = lnode | ||||
self.rnode = rnode | self.rnode = rnode | ||||
class transplants(object): | class transplants: | ||||
def __init__(self, path=None, transplantfile=None, opener=None): | def __init__(self, path=None, transplantfile=None, opener=None): | ||||
self.path = path | self.path = path | ||||
self.transplantfile = transplantfile | self.transplantfile = transplantfile | ||||
self.opener = opener | self.opener = opener | ||||
if not opener: | if not opener: | ||||
self.opener = vfsmod.vfs(self.path) | self.opener = vfsmod.vfs(self.path) | ||||
self.transplants = {} | self.transplants = {} | ||||
def remove(self, transplant): | def remove(self, transplant): | ||||
list = self.transplants.get(transplant.rnode) | list = self.transplants.get(transplant.rnode) | ||||
if list: | if list: | ||||
del list[list.index(transplant)] | del list[list.index(transplant)] | ||||
self.dirty = True | self.dirty = True | ||||
class transplanter(object): | class transplanter: | ||||
def __init__(self, ui, repo, opts): | def __init__(self, ui, repo, opts): | ||||
self.ui = ui | self.ui = ui | ||||
self.repo = repo | self.repo = repo | ||||
self.path = repo.vfs.join(b'transplant') | self.path = repo.vfs.join(b'transplant') | ||||
self.opener = vfsmod.vfs(self.path) | self.opener = vfsmod.vfs(self.path) | ||||
self.transplants = transplants( | self.transplants = transplants( | ||||
self.path, b'transplants', opener=self.opener | self.path, b'transplants', opener=self.opener | ||||
) | ) |
class BadDomainNameCircular(BadDomainName): | class BadDomainNameCircular(BadDomainName): | ||||
pass | pass | ||||
# implementation classes | # implementation classes | ||||
class DNSEntry(object): | class DNSEntry: | ||||
"""A DNS entry""" | """A DNS entry""" | ||||
def __init__(self, name, type, clazz): | def __init__(self, name, type, clazz): | ||||
self.key = name.lower() | self.key = name.lower() | ||||
self.name = name | self.name = name | ||||
self.type = type | self.type = type | ||||
self.clazz = clazz & _CLASS_MASK | self.clazz = clazz & _CLASS_MASK | ||||
self.unique = (clazz & _CLASS_UNIQUE) != 0 | self.unique = (clazz & _CLASS_UNIQUE) != 0 | ||||
) | ) | ||||
return 0 | return 0 | ||||
def __repr__(self): | def __repr__(self): | ||||
"""String representation""" | """String representation""" | ||||
return self.toString(b"%s:%s" % (self.server, self.port)) | return self.toString(b"%s:%s" % (self.server, self.port)) | ||||
class DNSIncoming(object): | class DNSIncoming: | ||||
"""Object representation of an incoming DNS packet""" | """Object representation of an incoming DNS packet""" | ||||
def __init__(self, data): | def __init__(self, data): | ||||
"""Constructor from string holding bytes of packet""" | """Constructor from string holding bytes of packet""" | ||||
self.offset = 0 | self.offset = 0 | ||||
self.data = data | self.data = data | ||||
self.questions = [] | self.questions = [] | ||||
self.answers = [] | self.answers = [] | ||||
if next >= 0: | if next >= 0: | ||||
self.offset = next | self.offset = next | ||||
else: | else: | ||||
self.offset = off | self.offset = off | ||||
return result | return result | ||||
class DNSOutgoing(object): | class DNSOutgoing: | ||||
"""Object representation of an outgoing packet""" | """Object representation of an outgoing packet""" | ||||
def __init__(self, flags, multicast=1): | def __init__(self, flags, multicast=1): | ||||
self.finished = 0 | self.finished = 0 | ||||
self.id = 0 | self.id = 0 | ||||
self.multicast = multicast | self.multicast = multicast | ||||
self.flags = flags | self.flags = flags | ||||
self.names = {} | self.names = {} | ||||
self.insertShort(0, self.flags) | self.insertShort(0, self.flags) | ||||
if self.multicast: | if self.multicast: | ||||
self.insertShort(0, 0) | self.insertShort(0, 0) | ||||
else: | else: | ||||
self.insertShort(0, self.id) | self.insertShort(0, self.id) | ||||
return b''.join(self.data) | return b''.join(self.data) | ||||
class DNSCache(object): | class DNSCache: | ||||
"""A cache of DNS entries""" | """A cache of DNS entries""" | ||||
def __init__(self): | def __init__(self): | ||||
self.cache = {} | self.cache = {} | ||||
def add(self, entry): | def add(self, entry): | ||||
"""Adds an entry""" | """Adds an entry""" | ||||
try: | try: | ||||
self.condition.release() | self.condition.release() | ||||
def notify(self): | def notify(self): | ||||
self.condition.acquire() | self.condition.acquire() | ||||
self.condition.notify() | self.condition.notify() | ||||
self.condition.release() | self.condition.release() | ||||
class Listener(object): | class Listener: | ||||
"""A Listener is used by this module to listen on the multicast | """A Listener is used by this module to listen on the multicast | ||||
group to which DNS messages are sent, allowing the implementation | group to which DNS messages are sent, allowing the implementation | ||||
to cache information as it arrives. | to cache information as it arrives. | ||||
It requires registration with an Engine object in order to have | It requires registration with an Engine object in order to have | ||||
the read() method called when a socket is available for reading.""" | the read() method called when a socket is available for reading.""" | ||||
def __init__(self, zeroconf): | def __init__(self, zeroconf): | ||||
if len(self.list) > 0: | if len(self.list) > 0: | ||||
event = self.list.pop(0) | event = self.list.pop(0) | ||||
if event is not None: | if event is not None: | ||||
event(self.zeroconf) | event(self.zeroconf) | ||||
class ServiceInfo(object): | class ServiceInfo: | ||||
"""Service information""" | """Service information""" | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
type, | type, | ||||
name, | name, | ||||
address=None, | address=None, | ||||
port=None, | port=None, | ||||
if len(self.text) < 20: | if len(self.text) < 20: | ||||
result += self.text | result += self.text | ||||
else: | else: | ||||
result += self.text[:17] + b"..." | result += self.text[:17] + b"..." | ||||
result += b"]" | result += b"]" | ||||
return result | return result | ||||
class Zeroconf(object): | class Zeroconf: | ||||
"""Implementation of Zeroconf Multicast DNS Service Discovery | """Implementation of Zeroconf Multicast DNS Service Discovery | ||||
Supports registration, unregistration, queries and browsing. | Supports registration, unregistration, queries and browsing. | ||||
""" | """ | ||||
def __init__(self, bindaddress=None): | def __init__(self, bindaddress=None): | ||||
"""Creates an instance of the Zeroconf class, establishing | """Creates an instance of the Zeroconf class, establishing | ||||
multicast communications, listening and reaping threads.""" | multicast communications, listening and reaping threads.""" |
desc = name | desc = name | ||||
publish(name, desc, path, port) | publish(name, desc, path, port) | ||||
return httpd | return httpd | ||||
# listen | # listen | ||||
class listener(object): | class listener: | ||||
def __init__(self): | def __init__(self): | ||||
self.found = {} | self.found = {} | ||||
def removeService(self, server, type, name): | def removeService(self, server, type, name): | ||||
if repr(name) in self.found: | if repr(name) in self.found: | ||||
del self.found[repr(name)] | del self.found[repr(name)] | ||||
def addService(self, server, type, name): | def addService(self, server, type, name): |
import sys | import sys | ||||
import textwrap | import textwrap | ||||
try: | try: | ||||
import io | import io | ||||
except ImportError: | except ImportError: | ||||
# replacement of io.open() for python < 2.6 | # replacement of io.open() for python < 2.6 | ||||
# we use codecs instead | # we use codecs instead | ||||
class io(object): | class io: | ||||
@staticmethod | @staticmethod | ||||
def open(fpath, mode='r', encoding=None): | def open(fpath, mode='r', encoding=None): | ||||
return codecs.open(fpath, mode, encoding) | return codecs.open(fpath, mode, encoding) | ||||
# the default encoding to use when encoding cannot be detected | # the default encoding to use when encoding cannot be detected | ||||
default_encoding = 'utf-8' | default_encoding = 'utf-8' | ||||
""" | """ | ||||
return [] | return [] | ||||
# }}} | # }}} | ||||
# class _BaseEntry {{{ | # class _BaseEntry {{{ | ||||
class _BaseEntry(object): | class _BaseEntry: | ||||
""" | """ | ||||
Base class for :class:`~polib.POEntry` and :class:`~polib.MOEntry` classes. | Base class for :class:`~polib.POEntry` and :class:`~polib.MOEntry` classes. | ||||
This class should **not** be instanciated directly. | This class should **not** be instanciated directly. | ||||
""" | """ | ||||
def __init__(self, *args, **kwargs): | def __init__(self, *args, **kwargs): | ||||
""" | """ | ||||
Constructor, accepts the following keyword arguments: | Constructor, accepts the following keyword arguments: | ||||
def __hash__(self): | def __hash__(self): | ||||
return hash((self.msgid, self.msgstr)) | return hash((self.msgid, self.msgstr)) | ||||
# }}} | # }}} | ||||
# class _POFileParser {{{ | # class _POFileParser {{{ | ||||
class _POFileParser(object): | class _POFileParser: | ||||
""" | """ | ||||
A finite state machine to parse efficiently and correctly po | A finite state machine to parse efficiently and correctly po | ||||
file format. | file format. | ||||
""" | """ | ||||
def __init__(self, pofile, *args, **kwargs): | def __init__(self, pofile, *args, **kwargs): | ||||
""" | """ | ||||
Constructor. | Constructor. | ||||
# don't change the current state | # don't change the current state | ||||
return False | return False | ||||
# }}} | # }}} | ||||
# class _MOFileParser {{{ | # class _MOFileParser {{{ | ||||
class _MOFileParser(object): | class _MOFileParser: | ||||
""" | """ | ||||
A class to parse binary mo files. | A class to parse binary mo files. | ||||
""" | """ | ||||
def __init__(self, mofile, *args, **kwargs): | def __init__(self, mofile, *args, **kwargs): | ||||
""" | """ | ||||
Constructor. | Constructor. | ||||
gca = commonancestorsheads(pfunc, *orignodes) | gca = commonancestorsheads(pfunc, *orignodes) | ||||
if len(gca) <= 1: | if len(gca) <= 1: | ||||
return gca | return gca | ||||
return deepest(gca) | return deepest(gca) | ||||
class incrementalmissingancestors(object): | class incrementalmissingancestors: | ||||
"""persistent state used to calculate missing ancestors incrementally | """persistent state used to calculate missing ancestors incrementally | ||||
Although similar in spirit to lazyancestors below, this is a separate class | Although similar in spirit to lazyancestors below, this is a separate class | ||||
because trying to support contains and missingancestors operations with the | because trying to support contains and missingancestors operations with the | ||||
same internal data structures adds needless complexity.""" | same internal data structures adds needless complexity.""" | ||||
def __init__(self, pfunc, bases): | def __init__(self, pfunc, bases): | ||||
self.bases = set(bases) | self.bases = set(bases) | ||||
see(p1) | see(p1) | ||||
else: | else: | ||||
heappop(visit) | heappop(visit) | ||||
if p2 not in seen: | if p2 not in seen: | ||||
heappush(visit, -p2) | heappush(visit, -p2) | ||||
see(p2) | see(p2) | ||||
class lazyancestors(object): | class lazyancestors: | ||||
def __init__(self, pfunc, revs, stoprev=0, inclusive=False): | def __init__(self, pfunc, revs, stoprev=0, inclusive=False): | ||||
"""Create a new object generating ancestors for the given revs. Does | """Create a new object generating ancestors for the given revs. Does | ||||
not generate revs lower than stoprev. | not generate revs lower than stoprev. | ||||
This is computed lazily starting from revs. The object supports | This is computed lazily starting from revs. The object supports | ||||
iteration and membership. | iteration and membership. | ||||
cl should be a changelog and revs should be an iterable. inclusive is | cl should be a changelog and revs should be an iterable. inclusive is |
if ctx.dirty(missing=True): | if ctx.dirty(missing=True): | ||||
dirty = b'+' | dirty = b'+' | ||||
fm.data(dirty=dirty) | fm.data(dirty=dirty) | ||||
fm.end() | fm.end() | ||||
return out.getvalue() | return out.getvalue() | ||||
class tarit(object): | class tarit: | ||||
"""write archive to tar file or stream. can write uncompressed, | """write archive to tar file or stream. can write uncompressed, | ||||
or compress with gzip or bzip2.""" | or compress with gzip or bzip2.""" | ||||
def __init__(self, dest, mtime, kind=b''): | def __init__(self, dest, mtime, kind=b''): | ||||
self.mtime = mtime | self.mtime = mtime | ||||
self.fileobj = None | self.fileobj = None | ||||
def taropen(mode, name=b'', fileobj=None): | def taropen(mode, name=b'', fileobj=None): | ||||
self.z.addfile(i, data) | self.z.addfile(i, data) | ||||
def done(self): | def done(self): | ||||
self.z.close() | self.z.close() | ||||
if self.fileobj: | if self.fileobj: | ||||
self.fileobj.close() | self.fileobj.close() | ||||
class zipit(object): | class zipit: | ||||
"""write archive to zip file or stream. can write uncompressed, | """write archive to zip file or stream. can write uncompressed, | ||||
or compressed with deflate.""" | or compressed with deflate.""" | ||||
def __init__(self, dest, mtime, compress=True): | def __init__(self, dest, mtime, compress=True): | ||||
if isinstance(dest, bytes): | if isinstance(dest, bytes): | ||||
dest = pycompat.fsdecode(dest) | dest = pycompat.fsdecode(dest) | ||||
self.z = zipfile.ZipFile( | self.z = zipfile.ZipFile( | ||||
dest, 'w', compress and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED | dest, 'w', compress and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED | ||||
int(self.mtime), | int(self.mtime), | ||||
) # last modification (UTC) | ) # last modification (UTC) | ||||
self.z.writestr(i, data) | self.z.writestr(i, data) | ||||
def done(self): | def done(self): | ||||
self.z.close() | self.z.close() | ||||
class fileit(object): | class fileit: | ||||
'''write archive as files in directory.''' | '''write archive as files in directory.''' | ||||
def __init__(self, name, mtime): | def __init__(self, name, mtime): | ||||
self.basedir = name | self.basedir = name | ||||
self.opener = vfsmod.vfs(self.basedir) | self.opener = vfsmod.vfs(self.basedir) | ||||
self.mtime = mtime | self.mtime = mtime | ||||
def addfile(self, name, mode, islink, data): | def addfile(self, name, mode, islink, data): |
may need to tweak this behavior further. | may need to tweak this behavior further. | ||||
""" | """ | ||||
fp, pending = txnutil.trypending( | fp, pending = txnutil.trypending( | ||||
repo.root, bookmarksvfs(repo), b'bookmarks' | repo.root, bookmarksvfs(repo), b'bookmarks' | ||||
) | ) | ||||
return fp | return fp | ||||
class bmstore(object): | class bmstore: | ||||
r"""Storage for bookmarks. | r"""Storage for bookmarks. | ||||
This object should do all bookmark-related reads and writes, so | This object should do all bookmark-related reads and writes, so | ||||
that it's fairly simple to replace the storage underlying | that it's fairly simple to replace the storage underlying | ||||
bookmarks without having to clone the logic surrounding | bookmarks without having to clone the logic surrounding | ||||
bookmarks. This type also should manage the active bookmark, if | bookmarks. This type also should manage the active bookmark, if | ||||
any. | any. | ||||
subsettable = repoviewutil.subsettable | subsettable = repoviewutil.subsettable | ||||
calcsize = struct.calcsize | calcsize = struct.calcsize | ||||
pack_into = struct.pack_into | pack_into = struct.pack_into | ||||
unpack_from = struct.unpack_from | unpack_from = struct.unpack_from | ||||
class BranchMapCache(object): | class BranchMapCache: | ||||
"""mapping of filtered views of repo with their branchcache""" | """mapping of filtered views of repo with their branchcache""" | ||||
def __init__(self): | def __init__(self): | ||||
self._per_filter = {} | self._per_filter = {} | ||||
def __getitem__(self, repo): | def __getitem__(self, repo): | ||||
self.updatecache(repo) | self.updatecache(repo) | ||||
return self._per_filter[repo.filtername] | return self._per_filter[repo.filtername] | ||||
def _branchcachedesc(repo): | def _branchcachedesc(repo): | ||||
if repo.filtername is not None: | if repo.filtername is not None: | ||||
return b'branch cache (%s)' % repo.filtername | return b'branch cache (%s)' % repo.filtername | ||||
else: | else: | ||||
return b'branch cache' | return b'branch cache' | ||||
class branchcache(object): | class branchcache: | ||||
"""A dict like object that hold branches heads cache. | """A dict like object that hold branches heads cache. | ||||
This cache is used to avoid costly computations to determine all the | This cache is used to avoid costly computations to determine all the | ||||
branch heads of a repo. | branch heads of a repo. | ||||
The cache is serialized on disk in the following format: | The cache is serialized on disk in the following format: | ||||
<tip hex node> <tip rev number> [optional filtered repo hex hash] | <tip hex node> <tip rev number> [optional filtered repo hex hash] | ||||
_rbcrecfmt = b'>4sI' | _rbcrecfmt = b'>4sI' | ||||
_rbcrecsize = calcsize(_rbcrecfmt) | _rbcrecsize = calcsize(_rbcrecfmt) | ||||
_rbcmininc = 64 * _rbcrecsize | _rbcmininc = 64 * _rbcrecsize | ||||
_rbcnodelen = 4 | _rbcnodelen = 4 | ||||
_rbcbranchidxmask = 0x7FFFFFFF | _rbcbranchidxmask = 0x7FFFFFFF | ||||
_rbccloseflag = 0x80000000 | _rbccloseflag = 0x80000000 | ||||
class revbranchcache(object): | class revbranchcache: | ||||
"""Persistent cache, mapping from revision number to branch name and close. | """Persistent cache, mapping from revision number to branch name and close. | ||||
This is a low level cache, independent of filtering. | This is a low level cache, independent of filtering. | ||||
Branch names are stored in rbc-names in internal encoding separated by 0. | Branch names are stored in rbc-names in internal encoding separated by 0. | ||||
rbc-names is append-only, and each branch name is only stored once and will | rbc-names is append-only, and each branch name is only stored once and will | ||||
thus have a unique index. | thus have a unique index. | ||||
The branch info for each revision is stored in rbc-revs as constant size | The branch info for each revision is stored in rbc-revs as constant size |
assert lparttype not in parthandlermapping | assert lparttype not in parthandlermapping | ||||
parthandlermapping[lparttype] = func | parthandlermapping[lparttype] = func | ||||
func.params = frozenset(params) | func.params = frozenset(params) | ||||
return func | return func | ||||
return _decorator | return _decorator | ||||
class unbundlerecords(object): | class unbundlerecords: | ||||
"""keep record of what happens during and unbundle | """keep record of what happens during and unbundle | ||||
New records are added using `records.add('cat', obj)`. Where 'cat' is a | New records are added using `records.add('cat', obj)`. Where 'cat' is a | ||||
category of record and obj is an arbitrary object. | category of record and obj is an arbitrary object. | ||||
`records['cat']` will return all entries of this category 'cat'. | `records['cat']` will return all entries of this category 'cat'. | ||||
Iterating on the object itself will yield `('category', obj)` tuples | Iterating on the object itself will yield `('category', obj)` tuples | ||||
return len(self._sequences) | return len(self._sequences) | ||||
def __nonzero__(self): | def __nonzero__(self): | ||||
return bool(self._sequences) | return bool(self._sequences) | ||||
__bool__ = __nonzero__ | __bool__ = __nonzero__ | ||||
class bundleoperation(object): | class bundleoperation: | ||||
"""an object that represents a single bundling process | """an object that represents a single bundling process | ||||
Its purpose is to carry unbundle-related objects and states. | Its purpose is to carry unbundle-related objects and states. | ||||
A new object should be created at the beginning of each bundle processing. | A new object should be created at the beginning of each bundle processing. | ||||
The object is to be returned by the processing function. | The object is to be returned by the processing function. | ||||
The object has very little content now it will ultimately contain: | The object has very little content now it will ultimately contain: | ||||
return processbundle(repo, unbundler, lambda: tr, source=source) | return processbundle(repo, unbundler, lambda: tr, source=source) | ||||
else: | else: | ||||
# the transactiongetter won't be used, but we might as well set it | # the transactiongetter won't be used, but we might as well set it | ||||
op = bundleoperation(repo, lambda: tr, source=source) | op = bundleoperation(repo, lambda: tr, source=source) | ||||
_processchangegroup(op, unbundler, tr, source, url, **kwargs) | _processchangegroup(op, unbundler, tr, source, url, **kwargs) | ||||
return op | return op | ||||
class partiterator(object): | class partiterator: | ||||
def __init__(self, repo, op, unbundler): | def __init__(self, repo, op, unbundler): | ||||
self.repo = repo | self.repo = repo | ||||
self.op = op | self.op = op | ||||
self.unbundler = unbundler | self.unbundler = unbundler | ||||
self.iterator = None | self.iterator = None | ||||
self.count = 0 | self.count = 0 | ||||
self.current = None | self.current = None | ||||
b"HG10BZ": (b"HG10", b'BZ'), | b"HG10BZ": (b"HG10", b'BZ'), | ||||
b"HG10GZ": (b"HG10GZ", b'GZ'), | b"HG10GZ": (b"HG10GZ", b'GZ'), | ||||
} | } | ||||
# hgweb uses this list to communicate its preferred type | # hgweb uses this list to communicate its preferred type | ||||
bundlepriority = [b'HG10GZ', b'HG10BZ', b'HG10UN'] | bundlepriority = [b'HG10GZ', b'HG10BZ', b'HG10UN'] | ||||
class bundle20(object): | class bundle20: | ||||
"""represent an outgoing bundle2 container | """represent an outgoing bundle2 container | ||||
Use the `addparam` method to add stream level parameter. and `newpart` to | Use the `addparam` method to add stream level parameter. and `newpart` to | ||||
populate it. Then call `getchunks` to retrieve all the binary chunks of | populate it. Then call `getchunks` to retrieve all the binary chunks of | ||||
data that compose the bundle2 container.""" | data that compose the bundle2 container.""" | ||||
_magicstring = b'HG20' | _magicstring = b'HG20' | ||||
server output""" | server output""" | ||||
salvaged = [] | salvaged = [] | ||||
for part in self._parts: | for part in self._parts: | ||||
if part.type.startswith(b'output'): | if part.type.startswith(b'output'): | ||||
salvaged.append(part.copy()) | salvaged.append(part.copy()) | ||||
return salvaged | return salvaged | ||||
class unpackermixin(object): | class unpackermixin: | ||||
"""A mixin to extract bytes and struct data from a stream""" | """A mixin to extract bytes and struct data from a stream""" | ||||
def __init__(self, fp): | def __init__(self, fp): | ||||
self._fp = fp | self._fp = fp | ||||
def _unpack(self, format): | def _unpack(self, format): | ||||
"""unpack this struct format from the stream | """unpack this struct format from the stream | ||||
"""read compression parameter and install payload decompression""" | """read compression parameter and install payload decompression""" | ||||
if value not in util.compengines.supportedbundletypes: | if value not in util.compengines.supportedbundletypes: | ||||
raise error.BundleUnknownFeatureError(params=(param,), values=(value,)) | raise error.BundleUnknownFeatureError(params=(param,), values=(value,)) | ||||
unbundler._compengine = util.compengines.forbundletype(value) | unbundler._compengine = util.compengines.forbundletype(value) | ||||
if value is not None: | if value is not None: | ||||
unbundler._compressed = True | unbundler._compressed = True | ||||
class bundlepart(object): | class bundlepart: | ||||
"""A bundle2 part contains application level payload | """A bundle2 part contains application level payload | ||||
The part `type` is used to route the part to the application level | The part `type` is used to route the part to the application level | ||||
handler. | handler. | ||||
The part payload is contained in ``part.data``. It could be raw bytes or a | The part payload is contained in ``part.data``. It could be raw bytes or a | ||||
generator of byte chunks. | generator of byte chunks. | ||||
finally: | finally: | ||||
if not hardabort: | if not hardabort: | ||||
part.consume() | part.consume() | ||||
self.ui.debug( | self.ui.debug( | ||||
b'bundle2-input-stream-interrupt: closing out of band context\n' | b'bundle2-input-stream-interrupt: closing out of band context\n' | ||||
) | ) | ||||
class interruptoperation(object): | class interruptoperation: | ||||
"""A limited operation to be use by part handler during interruption | """A limited operation to be use by part handler during interruption | ||||
It only have access to an ui object. | It only have access to an ui object. | ||||
""" | """ | ||||
def __init__(self, ui): | def __init__(self, ui): | ||||
self.ui = ui | self.ui = ui | ||||
self.reply = None | self.reply = None |
from .utils import stringutil | from .utils import stringutil | ||||
urlreq = util.urlreq | urlreq = util.urlreq | ||||
CB_MANIFEST_FILE = b'clonebundles.manifest' | CB_MANIFEST_FILE = b'clonebundles.manifest' | ||||
@attr.s | @attr.s | ||||
class bundlespec(object): | class bundlespec: | ||||
compression = attr.ib() | compression = attr.ib() | ||||
wirecompression = attr.ib() | wirecompression = attr.ib() | ||||
version = attr.ib() | version = attr.ib() | ||||
wireversion = attr.ib() | wireversion = attr.ib() | ||||
params = attr.ib() | params = attr.ib() | ||||
contentopts = attr.ib() | contentopts = attr.ib() | ||||
) | ) | ||||
continue | continue | ||||
newentries.append(entry) | newentries.append(entry) | ||||
return newentries | return newentries | ||||
class clonebundleentry(object): | class clonebundleentry: | ||||
"""Represents an item in a clone bundles manifest. | """Represents an item in a clone bundles manifest. | ||||
This rich class is needed to support sorting since sorted() in Python 3 | This rich class is needed to support sorting since sorted() in Python 3 | ||||
doesn't support ``cmp`` and our comparison is complex enough that ``key=`` | doesn't support ``cmp`` and our comparison is complex enough that ``key=`` | ||||
won't work. | won't work. | ||||
""" | """ | ||||
def __init__(self, value, prefers): | def __init__(self, value, prefers): |
for chunkdata in iter(cgunpacker.filelogheader, {}): | for chunkdata in iter(cgunpacker.filelogheader, {}): | ||||
fname = chunkdata[b'filename'] | fname = chunkdata[b'filename'] | ||||
filespos[fname] = cgunpacker.tell() | filespos[fname] = cgunpacker.tell() | ||||
for chunk in iter(lambda: cgunpacker.deltachunk(None), {}): | for chunk in iter(lambda: cgunpacker.deltachunk(None), {}): | ||||
pass | pass | ||||
return filespos | return filespos | ||||
class bundlerepository(object): | class bundlerepository: | ||||
"""A repository instance that is a union of a local repo and a bundle. | """A repository instance that is a union of a local repo and a bundle. | ||||
Instances represent a read-only repository composed of a local repository | Instances represent a read-only repository composed of a local repository | ||||
with the contents of a bundle file applied. The repository instance is | with the contents of a bundle file applied. The repository instance is | ||||
conceptually similar to the state of a repository after an | conceptually similar to the state of a repository after an | ||||
``hg unbundle`` operation. However, the contents of the bundle are never | ``hg unbundle`` operation. However, the contents of the bundle are never | ||||
applied to the actual base repository. | applied to the actual base repository. | ||||
pass | pass | ||||
repo.__class__ = derivedbundlerepository | repo.__class__ = derivedbundlerepository | ||||
bundlerepository.__init__(repo, bundlepath, url, tempparent) | bundlerepository.__init__(repo, bundlepath, url, tempparent) | ||||
return repo | return repo | ||||
class bundletransactionmanager(object): | class bundletransactionmanager: | ||||
def transaction(self): | def transaction(self): | ||||
return None | return None | ||||
def close(self): | def close(self): | ||||
raise NotImplementedError | raise NotImplementedError | ||||
def release(self): | def release(self): | ||||
raise NotImplementedError | raise NotImplementedError |
attrkinds[lib.VREG] = statmod.S_IFREG | attrkinds[lib.VREG] = statmod.S_IFREG | ||||
attrkinds[lib.VDIR] = statmod.S_IFDIR | attrkinds[lib.VDIR] = statmod.S_IFDIR | ||||
attrkinds[lib.VLNK] = statmod.S_IFLNK | attrkinds[lib.VLNK] = statmod.S_IFLNK | ||||
attrkinds[lib.VBLK] = statmod.S_IFBLK | attrkinds[lib.VBLK] = statmod.S_IFBLK | ||||
attrkinds[lib.VCHR] = statmod.S_IFCHR | attrkinds[lib.VCHR] = statmod.S_IFCHR | ||||
attrkinds[lib.VFIFO] = statmod.S_IFIFO | attrkinds[lib.VFIFO] = statmod.S_IFIFO | ||||
attrkinds[lib.VSOCK] = statmod.S_IFSOCK | attrkinds[lib.VSOCK] = statmod.S_IFSOCK | ||||
class stat_res(object): | class stat_res: | ||||
def __init__(self, st_mode, st_mtime, st_size): | def __init__(self, st_mode, st_mtime, st_size): | ||||
self.st_mode = st_mode | self.st_mode = st_mode | ||||
self.st_mtime = st_mtime | self.st_mtime = st_mtime | ||||
self.st_size = st_size | self.st_size = st_size | ||||
tv_sec_ofs = ffi.offsetof(b"struct timespec", b"tv_sec") | tv_sec_ofs = ffi.offsetof(b"struct timespec", b"tv_sec") | ||||
buf = ffi.new(b"char[]", listdir_batch_size) | buf = ffi.new(b"char[]", listdir_batch_size) | ||||
fh.close() | fh.close() | ||||
if cleanup is not None: | if cleanup is not None: | ||||
if filename and vfs: | if filename and vfs: | ||||
vfs.unlink(cleanup) | vfs.unlink(cleanup) | ||||
else: | else: | ||||
os.unlink(cleanup) | os.unlink(cleanup) | ||||
class cg1unpacker(object): | class cg1unpacker: | ||||
"""Unpacker for cg1 changegroup streams. | """Unpacker for cg1 changegroup streams. | ||||
A changegroup unpacker handles the framing of the revision data in | A changegroup unpacker handles the framing of the revision data in | ||||
the wire format. Most consumers will want to use the apply() | the wire format. Most consumers will want to use the apply() | ||||
method to add the changes from the changegroup to a repository. | method to add the changes from the changegroup to a repository. | ||||
If you're forwarding a changegroup unmodified to another consumer, | If you're forwarding a changegroup unmodified to another consumer, | ||||
use getchunks(), which returns an iterator of changegroup | use getchunks(), which returns an iterator of changegroup | ||||
deltabase, | deltabase, | ||||
delta, | delta, | ||||
flags, | flags, | ||||
sidedata, | sidedata, | ||||
protocol_flags, | protocol_flags, | ||||
) | ) | ||||
class headerlessfixup(object): | class headerlessfixup: | ||||
def __init__(self, fh, h): | def __init__(self, fh, h): | ||||
self._h = h | self._h = h | ||||
self._fh = fh | self._fh = fh | ||||
def read(self, n): | def read(self, n): | ||||
if self._h: | if self._h: | ||||
d, self._h = self._h[:n], self._h[n:] | d, self._h = self._h[:n], self._h[n:] | ||||
if len(d) < n: | if len(d) < n: | ||||
revision.linknode = linknode | revision.linknode = linknode | ||||
yield revision | yield revision | ||||
if progress: | if progress: | ||||
progress.complete() | progress.complete() | ||||
class cgpacker(object): | class cgpacker: | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
repo, | repo, | ||||
oldmatcher, | oldmatcher, | ||||
matcher, | matcher, | ||||
version, | version, | ||||
builddeltaheader, | builddeltaheader, | ||||
manifestsend, | manifestsend, |
return b"\0".join(items) | return b"\0".join(items) | ||||
def stripdesc(desc): | def stripdesc(desc): | ||||
"""strip trailing whitespace and leading and trailing empty lines""" | """strip trailing whitespace and leading and trailing empty lines""" | ||||
return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n') | return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n') | ||||
class appender(object): | class appender: | ||||
"""the changelog index must be updated last on disk, so we use this class | """the changelog index must be updated last on disk, so we use this class | ||||
to delay writes to it""" | to delay writes to it""" | ||||
def __init__(self, vfs, name, mode, buf): | def __init__(self, vfs, name, mode, buf): | ||||
self.data = buf | self.data = buf | ||||
fp = vfs(name, mode) | fp = vfs(name, mode) | ||||
self.fp = fp | self.fp = fp | ||||
self.offset = fp.tell() | self.offset = fp.tell() | ||||
def __enter__(self): | def __enter__(self): | ||||
self.fp.__enter__() | self.fp.__enter__() | ||||
return self | return self | ||||
def __exit__(self, *args): | def __exit__(self, *args): | ||||
return self.fp.__exit__(*args) | return self.fp.__exit__(*args) | ||||
class _divertopener(object): | class _divertopener: | ||||
def __init__(self, opener, target): | def __init__(self, opener, target): | ||||
self._opener = opener | self._opener = opener | ||||
self._target = target | self._target = target | ||||
def __call__(self, name, mode=b'r', checkambig=False, **kwargs): | def __call__(self, name, mode=b'r', checkambig=False, **kwargs): | ||||
if name != self._target: | if name != self._target: | ||||
return self._opener(name, mode, **kwargs) | return self._opener(name, mode, **kwargs) | ||||
return self._opener(name + b".a", mode, **kwargs) | return self._opener(name + b".a", mode, **kwargs) | ||||
return opener(name, mode, **kwargs) | return opener(name, mode, **kwargs) | ||||
assert not kwargs | assert not kwargs | ||||
return appender(opener, name, mode, buf) | return appender(opener, name, mode, buf) | ||||
return _delay | return _delay | ||||
@attr.s | @attr.s | ||||
class _changelogrevision(object): | class _changelogrevision: | ||||
# Extensions might modify _defaultextra, so let the constructor below pass | # Extensions might modify _defaultextra, so let the constructor below pass | ||||
# it in | # it in | ||||
extra = attr.ib() | extra = attr.ib() | ||||
manifest = attr.ib() | manifest = attr.ib() | ||||
user = attr.ib(default=b'') | user = attr.ib(default=b'') | ||||
date = attr.ib(default=(0, 0)) | date = attr.ib(default=(0, 0)) | ||||
files = attr.ib(default=attr.Factory(list)) | files = attr.ib(default=attr.Factory(list)) | ||||
filesadded = attr.ib(default=None) | filesadded = attr.ib(default=None) | ||||
filesremoved = attr.ib(default=None) | filesremoved = attr.ib(default=None) | ||||
p1copies = attr.ib(default=None) | p1copies = attr.ib(default=None) | ||||
p2copies = attr.ib(default=None) | p2copies = attr.ib(default=None) | ||||
description = attr.ib(default=b'') | description = attr.ib(default=b'') | ||||
branchinfo = attr.ib(default=(_defaultextra[b'branch'], False)) | branchinfo = attr.ib(default=(_defaultextra[b'branch'], False)) | ||||
class changelogrevision(object): | class changelogrevision: | ||||
"""Holds results of a parsed changelog revision. | """Holds results of a parsed changelog revision. | ||||
Changelog revisions consist of multiple pieces of data, including | Changelog revisions consist of multiple pieces of data, including | ||||
the manifest node, user, and date. This object exposes a view into | the manifest node, user, and date. This object exposes a view into | ||||
the parsed object. | the parsed object. | ||||
""" | """ | ||||
__slots__ = ( | __slots__ = ( |
return (st[stat.ST_MTIME], st.st_size) | return (st[stat.ST_MTIME], st.st_size) | ||||
except OSError: | except OSError: | ||||
# could be ENOENT, EPERM etc. not fatal in any case | # could be ENOENT, EPERM etc. not fatal in any case | ||||
pass | pass | ||||
return _hashlist(pycompat.maplist(trystat, paths))[:12] | return _hashlist(pycompat.maplist(trystat, paths))[:12] | ||||
class hashstate(object): | class hashstate: | ||||
"""a structure storing confighash, mtimehash, paths used for mtimehash""" | """a structure storing confighash, mtimehash, paths used for mtimehash""" | ||||
def __init__(self, confighash, mtimehash, mtimepaths): | def __init__(self, confighash, mtimehash, mtimepaths): | ||||
self.confighash = confighash | self.confighash = confighash | ||||
self.mtimehash = mtimehash | self.mtimehash = mtimehash | ||||
self.mtimepaths = mtimepaths | self.mtimepaths = mtimepaths | ||||
@staticmethod | @staticmethod | ||||
commandserver.setuplogging(newui, fp=cdebug) | commandserver.setuplogging(newui, fp=cdebug) | ||||
if newui is not newlui: | if newui is not newlui: | ||||
extensions.populateui(newlui) | extensions.populateui(newlui) | ||||
commandserver.setuplogging(newlui, fp=cdebug) | commandserver.setuplogging(newlui, fp=cdebug) | ||||
return (newui, newlui) | return (newui, newlui) | ||||
class channeledsystem(object): | class channeledsystem: | ||||
"""Propagate ui.system() request in the following format: | """Propagate ui.system() request in the following format: | ||||
payload length (unsigned int), | payload length (unsigned int), | ||||
type, '\0', | type, '\0', | ||||
cmd, '\0', | cmd, '\0', | ||||
cwd, '\0', | cwd, '\0', | ||||
envkey, '=', val, '\0', | envkey, '=', val, '\0', | ||||
... | ... | ||||
# if the basename of address contains '.', use only the left part. this | # if the basename of address contains '.', use only the left part. this | ||||
# makes it possible for the client to pass 'server.tmp$PID' and follow by | # makes it possible for the client to pass 'server.tmp$PID' and follow by | ||||
# an atomic rename to avoid locking when spawning new servers. | # an atomic rename to avoid locking when spawning new servers. | ||||
dirname, basename = os.path.split(address) | dirname, basename = os.path.split(address) | ||||
basename = basename.split(b'.', 1)[0] | basename = basename.split(b'.', 1)[0] | ||||
return b'%s-%s' % (os.path.join(dirname, basename), hashstr) | return b'%s-%s' % (os.path.join(dirname, basename), hashstr) | ||||
class chgunixservicehandler(object): | class chgunixservicehandler: | ||||
"""Set of operations for chg services""" | """Set of operations for chg services""" | ||||
pollinterval = 1 # [sec] | pollinterval = 1 # [sec] | ||||
def __init__(self, ui): | def __init__(self, ui): | ||||
self.ui = ui | self.ui = ui | ||||
# TODO: use PEP 526 syntax (`_hashstate: hashstate` at the class level) | # TODO: use PEP 526 syntax (`_hashstate: hashstate` at the class level) |
def recordinwlock(ui, repo, message, match, opts): | def recordinwlock(ui, repo, message, match, opts): | ||||
with repo.wlock(): | with repo.wlock(): | ||||
return recordfunc(ui, repo, message, match, opts) | return recordfunc(ui, repo, message, match, opts) | ||||
return commit(ui, repo, recordinwlock, pats, opts) | return commit(ui, repo, recordinwlock, pats, opts) | ||||
class dirnode(object): | class dirnode: | ||||
""" | """ | ||||
Represent a directory in user working copy with information required for | Represent a directory in user working copy with information required for | ||||
the purpose of tersing its status. | the purpose of tersing its status. | ||||
path is the path to the directory, without a trailing '/' | path is the path to the directory, without a trailing '/' | ||||
statuses is a set of statuses of all files in this directory (this includes | statuses is a set of statuses of all files in this directory (this includes | ||||
all the files in all the subdirectories too) | all the files in all the subdirectories too) | ||||
def _commentlines(raw): | def _commentlines(raw): | ||||
'''Surround lineswith a comment char and a new line''' | '''Surround lineswith a comment char and a new line''' | ||||
lines = raw.splitlines() | lines = raw.splitlines() | ||||
commentedlines = [b'# %s' % line for line in lines] | commentedlines = [b'# %s' % line for line in lines] | ||||
return b'\n'.join(commentedlines) + b'\n' | return b'\n'.join(commentedlines) + b'\n' | ||||
@attr.s(frozen=True) | @attr.s(frozen=True) | ||||
class morestatus(object): | class morestatus: | ||||
reporoot = attr.ib() | reporoot = attr.ib() | ||||
unfinishedop = attr.ib() | unfinishedop = attr.ib() | ||||
unfinishedmsg = attr.ib() | unfinishedmsg = attr.ib() | ||||
activemerge = attr.ib() | activemerge = attr.ib() | ||||
unresolvedpaths = attr.ib() | unresolvedpaths = attr.ib() | ||||
_formattedpaths = attr.ib(init=False, default=set()) | _formattedpaths = attr.ib(init=False, default=set()) | ||||
_label = b'status.morestatus' | _label = b'status.morestatus' | ||||
return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props)) | return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props)) | ||||
def isstdiofilename(pat): | def isstdiofilename(pat): | ||||
"""True if the given pat looks like a filename denoting stdin/stdout""" | """True if the given pat looks like a filename denoting stdin/stdout""" | ||||
return not pat or pat == b'-' | return not pat or pat == b'-' | ||||
class _unclosablefile(object): | class _unclosablefile: | ||||
def __init__(self, fp): | def __init__(self, fp): | ||||
self._fp = fp | self._fp = fp | ||||
def close(self): | def close(self): | ||||
pass | pass | ||||
def __iter__(self): | def __iter__(self): | ||||
return iter(self._fp) | return iter(self._fp) |
vfs as vfsmod, | vfs as vfsmod, | ||||
) | ) | ||||
from .utils import ( | from .utils import ( | ||||
cborutil, | cborutil, | ||||
procutil, | procutil, | ||||
) | ) | ||||
class channeledoutput(object): | class channeledoutput: | ||||
""" | """ | ||||
Write data to out in the following format: | Write data to out in the following format: | ||||
data length (unsigned int), | data length (unsigned int), | ||||
data | data | ||||
""" | """ | ||||
def __init__(self, out, channel): | def __init__(self, out, channel): | ||||
self.out.flush() | self.out.flush() | ||||
def __getattr__(self, attr): | def __getattr__(self, attr): | ||||
if attr in ('isatty', 'fileno', 'tell', 'seek'): | if attr in ('isatty', 'fileno', 'tell', 'seek'): | ||||
raise AttributeError(attr) | raise AttributeError(attr) | ||||
return getattr(self.out, attr) | return getattr(self.out, attr) | ||||
class channeledmessage(object): | class channeledmessage: | ||||
""" | """ | ||||
Write encoded message and metadata to out in the following format: | Write encoded message and metadata to out in the following format: | ||||
data length (unsigned int), | data length (unsigned int), | ||||
encoded message and metadata, as a flat key-value dict. | encoded message and metadata, as a flat key-value dict. | ||||
Each message should have 'type' attribute. Messages of unknown type | Each message should have 'type' attribute. Messages of unknown type | ||||
should be ignored. | should be ignored. | ||||
if data is not None: | if data is not None: | ||||
opts[b'data'] = data | opts[b'data'] = data | ||||
self._cout.write(self._encodefn(opts)) | self._cout.write(self._encodefn(opts)) | ||||
def __getattr__(self, attr): | def __getattr__(self, attr): | ||||
return getattr(self._cout, attr) | return getattr(self._cout, attr) | ||||
class channeledinput(object): | class channeledinput: | ||||
""" | """ | ||||
Read data from in_. | Read data from in_. | ||||
Requests for input are written to out in the following format: | Requests for input are written to out in the following format: | ||||
channel identifier - 'I' for plain input, 'L' line based (1 byte) | channel identifier - 'I' for plain input, 'L' line based (1 byte) | ||||
how many bytes to send at most (unsigned int), | how many bytes to send at most (unsigned int), | ||||
The client replies with: | The client replies with: | ||||
f = _messageencoders.get(n) | f = _messageencoders.get(n) | ||||
if f: | if f: | ||||
return n, f | return n, f | ||||
raise error.Abort( | raise error.Abort( | ||||
b'no supported message encodings: %s' % b' '.join(encnames) | b'no supported message encodings: %s' % b' '.join(encnames) | ||||
) | ) | ||||
class server(object): | class server: | ||||
""" | """ | ||||
Listens for commands on fin, runs them and writes the output on a channel | Listens for commands on fin, runs them and writes the output on a channel | ||||
based stream to fout. | based stream to fout. | ||||
""" | """ | ||||
def __init__(self, ui, repo, fin, fout, prereposetups=None): | def __init__(self, ui, repo, fin, fout, prereposetups=None): | ||||
self.cwd = encoding.getcwd() | self.cwd = encoding.getcwd() | ||||
targetuis = {ui} | targetuis = {ui} | ||||
if repo: | if repo: | ||||
targetuis.add(repo.baseui) | targetuis.add(repo.baseui) | ||||
targetuis.add(repo.ui) | targetuis.add(repo.ui) | ||||
for u in targetuis: | for u in targetuis: | ||||
u.setlogger(b'cmdserver', logger) | u.setlogger(b'cmdserver', logger) | ||||
class pipeservice(object): | class pipeservice: | ||||
def __init__(self, ui, repo, opts): | def __init__(self, ui, repo, opts): | ||||
self.ui = ui | self.ui = ui | ||||
self.repo = repo | self.repo = repo | ||||
def init(self): | def init(self): | ||||
pass | pass | ||||
def run(self): | def run(self): | ||||
fin.close() | fin.close() | ||||
try: | try: | ||||
fout.close() # implicit flush() may cause another EPIPE | fout.close() # implicit flush() may cause another EPIPE | ||||
except IOError as inst: | except IOError as inst: | ||||
if inst.errno != errno.EPIPE: | if inst.errno != errno.EPIPE: | ||||
raise | raise | ||||
class unixservicehandler(object): | class unixservicehandler: | ||||
"""Set of pluggable operations for unix-mode services | """Set of pluggable operations for unix-mode services | ||||
Almost all methods except for createcmdserver() are called in the main | Almost all methods except for createcmdserver() are called in the main | ||||
process. You can't pass mutable resource back from createcmdserver(). | process. You can't pass mutable resource back from createcmdserver(). | ||||
""" | """ | ||||
pollinterval = None | pollinterval = None | ||||
"""Called when main process notices new connection""" | """Called when main process notices new connection""" | ||||
def createcmdserver(self, repo, conn, fin, fout, prereposetups): | def createcmdserver(self, repo, conn, fin, fout, prereposetups): | ||||
"""Create new command server instance; called in the process that | """Create new command server instance; called in the process that | ||||
serves for the current connection""" | serves for the current connection""" | ||||
return server(self.ui, repo, fin, fout, prereposetups) | return server(self.ui, repo, fin, fout, prereposetups) | ||||
class unixforkingservice(object): | class unixforkingservice: | ||||
""" | """ | ||||
Listens on unix domain socket and forks server per connection | Listens on unix domain socket and forks server per connection | ||||
""" | """ | ||||
def __init__(self, ui, repo, opts, handler=None): | def __init__(self, ui, repo, opts, handler=None): | ||||
self.ui = ui | self.ui = ui | ||||
self.repo = repo | self.repo = repo | ||||
self.address = opts[b'address'] | self.address = opts[b'address'] |
from .pycompat import getattr | from .pycompat import getattr | ||||
from . import ( | from . import ( | ||||
encoding, | encoding, | ||||
error, | error, | ||||
util, | util, | ||||
) | ) | ||||
class config(object): | class config: | ||||
def __init__(self, data=None): | def __init__(self, data=None): | ||||
self._current_source_level = 0 | self._current_source_level = 0 | ||||
self._data = {} | self._data = {} | ||||
self._unset = [] | self._unset = [] | ||||
if data: | if data: | ||||
for k in data._data: | for k in data._data: | ||||
self._data[k] = data[k].copy() | self._data[k] = data[k].copy() | ||||
self._current_source_level = data._current_source_level + 1 | self._current_source_level = data._current_source_level + 1 |
for key in sorted(knownkeys & newkeys): | for key in sorted(knownkeys & newkeys): | ||||
msg = b"extension '%s' overwrite config item '%s.%s'" | msg = b"extension '%s' overwrite config item '%s.%s'" | ||||
msg %= (extname, section, key) | msg %= (extname, section, key) | ||||
ui.develwarn(msg, config=b'warn-config') | ui.develwarn(msg, config=b'warn-config') | ||||
knownitems.update(items) | knownitems.update(items) | ||||
class configitem(object): | class configitem: | ||||
"""represent a known config item | """represent a known config item | ||||
:section: the official config section where to find this item, | :section: the official config section where to find this item, | ||||
:name: the official name within the section, | :name: the official name within the section, | ||||
:default: default value for this item, | :default: default value for this item, | ||||
:alias: optional list of tuples as alternatives, | :alias: optional list of tuples as alternatives, | ||||
:generic: this is a generic definition, match name using regular expression. | :generic: this is a generic definition, match name using regular expression. | ||||
""" | """ |
) | ) | ||||
from .dirstateutils import ( | from .dirstateutils import ( | ||||
timestamp, | timestamp, | ||||
) | ) | ||||
propertycache = util.propertycache | propertycache = util.propertycache | ||||
class basectx(object): | class basectx: | ||||
"""A basectx object represents the common logic for its children: | """A basectx object represents the common logic for its children: | ||||
changectx: read-only context that is already present in the repo, | changectx: read-only context that is already present in the repo, | ||||
workingctx: a context that represents the working directory and can | workingctx: a context that represents the working directory and can | ||||
be committed, | be committed, | ||||
memctx: a context that represents changes in-memory and can also | memctx: a context that represents changes in-memory and can also | ||||
be committed.""" | be committed.""" | ||||
def __init__(self, repo): | def __init__(self, repo): | ||||
m = matchmod.badmatch(self._repo.narrowmatch(match), bad) | m = matchmod.badmatch(self._repo.narrowmatch(match), bad) | ||||
return self._manifest.walk(m) | return self._manifest.walk(m) | ||||
def matches(self, match): | def matches(self, match): | ||||
return self.walk(match) | return self.walk(match) | ||||
class basefilectx(object): | class basefilectx: | ||||
"""A filecontext object represents the common logic for its children: | """A filecontext object represents the common logic for its children: | ||||
filectx: read-only access to a filerevision that is already present | filectx: read-only access to a filerevision that is already present | ||||
in the repo, | in the repo, | ||||
workingfilectx: a filecontext that represents files from the working | workingfilectx: a filecontext that represents files from the working | ||||
directory, | directory, | ||||
memfilectx: a filecontext that represents files in-memory, | memfilectx: a filecontext that represents files in-memory, | ||||
""" | """ | ||||
elif f in self: | elif f in self: | ||||
modified.append(f) | modified.append(f) | ||||
else: | else: | ||||
removed.append(f) | removed.append(f) | ||||
return scmutil.status(modified, added, removed, [], [], [], []) | return scmutil.status(modified, added, removed, [], [], [], []) | ||||
class arbitraryfilectx(object): | class arbitraryfilectx: | ||||
"""Allows you to use filectx-like functions on a file in an arbitrary | """Allows you to use filectx-like functions on a file in an arbitrary | ||||
location on disk, possibly not in the working directory. | location on disk, possibly not in the working directory. | ||||
""" | """ | ||||
def __init__(self, path, repo=None): | def __init__(self, path, repo=None): | ||||
# Repo is optional because contrib/simplemerge uses this class. | # Repo is optional because contrib/simplemerge uses this class. | ||||
self._repo = repo | self._repo = repo | ||||
self._path = path | self._path = path |
elif mb[src] != m2[src] and not _related(c2[src], base[src]): | elif mb[src] != m2[src] and not _related(c2[src], base[src]): | ||||
return | return | ||||
elif mb[src] != m2[src] or mb.flags(src) != m2.flags(src): | elif mb[src] != m2[src] or mb.flags(src) != m2.flags(src): | ||||
# modified on side 2 | # modified on side 2 | ||||
for dst in dsts1: | for dst in dsts1: | ||||
copy[dst] = src | copy[dst] = src | ||||
class branch_copies(object): | class branch_copies: | ||||
"""Information about copies made on one side of a merge/graft. | """Information about copies made on one side of a merge/graft. | ||||
"copy" is a mapping from destination name -> source name, | "copy" is a mapping from destination name -> source name, | ||||
where source is in c1 and destination is in c2 or vice-versa. | where source is in c1 and destination is in c2 or vice-versa. | ||||
"movewithdir" is a mapping from source name -> destination name, | "movewithdir" is a mapping from source name -> destination name, | ||||
where the file at source present in one context but not the other | where the file at source present in one context but not the other | ||||
needs to be moved to destination by the merge process, because the | needs to be moved to destination by the merge process, because the |
"""Return True if the user wants to use curses | """Return True if the user wants to use curses | ||||
This method returns True if curses is found (and that python is built with | This method returns True if curses is found (and that python is built with | ||||
it) and that the user has the correct flag for the ui. | it) and that the user has the correct flag for the ui. | ||||
""" | """ | ||||
return curses and ui.interface(b"chunkselector") == b"curses" | return curses and ui.interface(b"chunkselector") == b"curses" | ||||
class patchnode(object): | class patchnode: | ||||
"""abstract class for patch graph nodes | """abstract class for patch graph nodes | ||||
(i.e. patchroot, header, hunk, hunkline) | (i.e. patchroot, header, hunk, hunkline) | ||||
""" | """ | ||||
def firstchild(self): | def firstchild(self): | ||||
raise NotImplementedError(b"method must be implemented by subclass") | raise NotImplementedError(b"method must be implemented by subclass") | ||||
def lastchild(self): | def lastchild(self): | ||||
def testchunkselector(testfn, ui, headerlist, operation=None): | def testchunkselector(testfn, ui, headerlist, operation=None): | ||||
""" | """ | ||||
test interface to get selection of chunks, and mark the applied flags | test interface to get selection of chunks, and mark the applied flags | ||||
of the chosen chunks. | of the chosen chunks. | ||||
""" | """ | ||||
chunkselector = curseschunkselector(headerlist, ui, operation) | chunkselector = curseschunkselector(headerlist, ui, operation) | ||||
class dummystdscr(object): | class dummystdscr: | ||||
def clear(self): | def clear(self): | ||||
pass | pass | ||||
def refresh(self): | def refresh(self): | ||||
pass | pass | ||||
chunkselector.stdscr = dummystdscr() | chunkselector.stdscr = dummystdscr() | ||||
if testfn and os.path.exists(testfn): | if testfn and os.path.exists(testfn): | ||||
_headermessages = { # {operation: text} | _headermessages = { # {operation: text} | ||||
b'apply': _(b'Select hunks to apply'), | b'apply': _(b'Select hunks to apply'), | ||||
b'discard': _(b'Select hunks to discard'), | b'discard': _(b'Select hunks to discard'), | ||||
b'keep': _(b'Select hunks to keep'), | b'keep': _(b'Select hunks to keep'), | ||||
None: _(b'Select hunks to record'), | None: _(b'Select hunks to record'), | ||||
} | } | ||||
class curseschunkselector(object): | class curseschunkselector: | ||||
def __init__(self, headerlist, ui, operation=None): | def __init__(self, headerlist, ui, operation=None): | ||||
# put the headers into a patch object | # put the headers into a patch object | ||||
self.headerlist = patch(headerlist) | self.headerlist = patch(headerlist) | ||||
self.ui = ui | self.ui = ui | ||||
self.opts = {} | self.opts = {} | ||||
self.errorstr = None | self.errorstr = None |
for rev in revsfn(start=first + 1): | for rev in revsfn(start=first + 1): | ||||
for prev in parentrevsfn(rev): | for prev in parentrevsfn(rev): | ||||
if prev != nullrev and prev in seen: | if prev != nullrev and prev in seen: | ||||
seen.add(rev) | seen.add(rev) | ||||
yield rev | yield rev | ||||
break | break | ||||
class subsetparentswalker(object): | class subsetparentswalker: | ||||
r"""Scan adjacent ancestors in the graph given by the subset | r"""Scan adjacent ancestors in the graph given by the subset | ||||
This computes parent-child relations in the sub graph filtered by | This computes parent-child relations in the sub graph filtered by | ||||
a revset. Primary use case is to draw a revisions graph. | a revset. Primary use case is to draw a revisions graph. | ||||
In the following example, we consider that the node 'f' has edges to all | In the following example, we consider that the node 'f' has edges to all | ||||
ancestor nodes, but redundant paths are eliminated. The edge 'f'->'b' | ancestor nodes, but redundant paths are eliminated. The edge 'f'->'b' | ||||
is eliminated because there is a path 'f'->'c'->'b' for example. | is eliminated because there is a path 'f'->'c'->'b' for example. | ||||
lbs, ubs = zip(linerange1, seen[i][1]) | lbs, ubs = zip(linerange1, seen[i][1]) | ||||
linerange1 = min(lbs), max(ubs) | linerange1 = min(lbs), max(ubs) | ||||
seen[i] = c, linerange1 | seen[i] = c, linerange1 | ||||
if inrange: | if inrange: | ||||
yield c, linerange1 | yield c, linerange1 | ||||
@attr.s(slots=True, frozen=True) | @attr.s(slots=True, frozen=True) | ||||
class annotateline(object): | class annotateline: | ||||
fctx = attr.ib() | fctx = attr.ib() | ||||
lineno = attr.ib() | lineno = attr.ib() | ||||
# Whether this annotation was the result of a skip-annotate. | # Whether this annotation was the result of a skip-annotate. | ||||
skip = attr.ib(default=False) | skip = attr.ib(default=False) | ||||
text = attr.ib(default=None) | text = attr.ib(default=None) | ||||
@attr.s(slots=True, frozen=True) | @attr.s(slots=True, frozen=True) | ||||
class _annotatedfile(object): | class _annotatedfile: | ||||
# list indexed by lineno - 1 | # list indexed by lineno - 1 | ||||
fctxs = attr.ib() | fctxs = attr.ib() | ||||
linenos = attr.ib() | linenos = attr.ib() | ||||
skips = attr.ib() | skips = attr.ib() | ||||
# full file content | # full file content | ||||
text = attr.ib() | text = attr.ib() | ||||
msg %= func.__name__ | msg %= func.__name__ | ||||
raise error.ProgrammingError(msg) | raise error.ProgrammingError(msg) | ||||
return func(self, *args, **kwargs) | return func(self, *args, **kwargs) | ||||
return wrap | return wrap | ||||
@interfaceutil.implementer(intdirstate.idirstate) | @interfaceutil.implementer(intdirstate.idirstate) | ||||
class dirstate(object): | class dirstate: | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
opener, | opener, | ||||
ui, | ui, | ||||
root, | root, | ||||
validate, | validate, | ||||
sparsematchfn, | sparsematchfn, | ||||
nodeconstants, | nodeconstants, |
if rustmod is None: | if rustmod is None: | ||||
DirstateItem = parsers.DirstateItem | DirstateItem = parsers.DirstateItem | ||||
else: | else: | ||||
DirstateItem = rustmod.DirstateItem | DirstateItem = rustmod.DirstateItem | ||||
rangemask = 0x7FFFFFFF | rangemask = 0x7FFFFFFF | ||||
class _dirstatemapcommon(object): | class _dirstatemapcommon: | ||||
""" | """ | ||||
Methods that are identical for both implementations of the dirstatemap | Methods that are identical for both implementations of the dirstatemap | ||||
class, with and without Rust extensions enabled. | class, with and without Rust extensions enabled. | ||||
""" | """ | ||||
# please pytype | # please pytype | ||||
_map = None | _map = None |
# | # | ||||
# Node IDs are null-padded if shorter than 32 bytes. | # Node IDs are null-padded if shorter than 32 bytes. | ||||
# A data file shorter than the specified used size is corrupted (truncated) | # A data file shorter than the specified used size is corrupted (truncated) | ||||
HEADER = struct.Struct( | HEADER = struct.Struct( | ||||
">{}s32s32s{}sLB".format(len(V2_FORMAT_MARKER), v2.TREE_METADATA_SIZE) | ">{}s32s32s{}sLB".format(len(V2_FORMAT_MARKER), v2.TREE_METADATA_SIZE) | ||||
) | ) | ||||
class DirstateDocket(object): | class DirstateDocket: | ||||
data_filename_pattern = b'dirstate.%s' | data_filename_pattern = b'dirstate.%s' | ||||
def __init__(self, parents, data_size, tree_metadata, uuid): | def __init__(self, parents, data_size, tree_metadata, uuid): | ||||
self.parents = parents | self.parents = parents | ||||
self.data_size = data_size | self.data_size = data_size | ||||
self.tree_metadata = tree_metadata | self.tree_metadata = tree_metadata | ||||
self.uuid = uuid | self.uuid = uuid | ||||
) | ) | ||||
def slice_with_len(data, start, len): | def slice_with_len(data, start, len): | ||||
return data[start : start + len] | return data[start : start + len] | ||||
@attr.s | @attr.s | ||||
class Node(object): | class Node: | ||||
path = attr.ib() | path = attr.ib() | ||||
entry = attr.ib() | entry = attr.ib() | ||||
parent = attr.ib(default=None) | parent = attr.ib(default=None) | ||||
children_count = attr.ib(default=0) | children_count = attr.ib(default=0) | ||||
children_offset = attr.ib(default=0) | children_offset = attr.ib(default=0) | ||||
descendants_with_entry = attr.ib(default=0) | descendants_with_entry = attr.ib(default=0) | ||||
tracked_descendants = attr.ib(default=0) | tracked_descendants = attr.ib(default=0) | ||||
common, anyinc, srvheads = res | common, anyinc, srvheads = res | ||||
if heads and not anyinc: | if heads and not anyinc: | ||||
# server could be lying on the advertised heads | # server could be lying on the advertised heads | ||||
has_node = repo.changelog.hasnode | has_node = repo.changelog.hasnode | ||||
anyinc = any(not has_node(n) for n in heads) | anyinc = any(not has_node(n) for n in heads) | ||||
return (list(common), anyinc, heads or list(srvheads)) | return (list(common), anyinc, heads or list(srvheads)) | ||||
class outgoing(object): | class outgoing: | ||||
"""Represents the result of a findcommonoutgoing() call. | """Represents the result of a findcommonoutgoing() call. | ||||
Members: | Members: | ||||
ancestorsof is a list of the nodes whose ancestors are included in the | ancestorsof is a list of the nodes whose ancestors are included in the | ||||
outgoing operation. | outgoing operation. | ||||
missing is a list of those ancestors of ancestorsof that are present in | missing is a list of those ancestors of ancestorsof that are present in |
from .utils import ( | from .utils import ( | ||||
procutil, | procutil, | ||||
stringutil, | stringutil, | ||||
urlutil, | urlutil, | ||||
) | ) | ||||
class request(object): | class request: | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
args, | args, | ||||
ui=None, | ui=None, | ||||
repo=None, | repo=None, | ||||
fin=None, | fin=None, | ||||
fout=None, | fout=None, | ||||
ferr=None, | ferr=None, | ||||
# tokenize each argument into exactly one word. | # tokenize each argument into exactly one word. | ||||
replacemap[b'"$@"'] = b' '.join(procutil.shellquote(arg) for arg in args) | replacemap[b'"$@"'] = b' '.join(procutil.shellquote(arg) for arg in args) | ||||
# escape '\$' for regex | # escape '\$' for regex | ||||
regex = b'|'.join(replacemap.keys()).replace(b'$', br'\$') | regex = b'|'.join(replacemap.keys()).replace(b'$', br'\$') | ||||
r = re.compile(regex) | r = re.compile(regex) | ||||
return r.sub(lambda x: replacemap[x.group()], cmd) | return r.sub(lambda x: replacemap[x.group()], cmd) | ||||
class cmdalias(object): | class cmdalias: | ||||
def __init__(self, ui, name, definition, cmdtable, source): | def __init__(self, ui, name, definition, cmdtable, source): | ||||
self.name = self.cmd = name | self.name = self.cmd = name | ||||
self.cmdname = b'' | self.cmdname = b'' | ||||
self.definition = definition | self.definition = definition | ||||
self.fn = None | self.fn = None | ||||
self.givenargs = [] | self.givenargs = [] | ||||
self.opts = [] | self.opts = [] | ||||
self.help = b'' | self.help = b'' | ||||
try: | try: | ||||
return util.checksignature(self.fn)(ui, *args, **opts) | return util.checksignature(self.fn)(ui, *args, **opts) | ||||
except error.SignatureError: | except error.SignatureError: | ||||
args = b' '.join([self.cmdname] + self.args) | args = b' '.join([self.cmdname] + self.args) | ||||
ui.debug(b"alias '%s' expands to '%s'\n" % (self.name, args)) | ui.debug(b"alias '%s' expands to '%s'\n" % (self.name, args)) | ||||
raise | raise | ||||
class lazyaliasentry(object): | class lazyaliasentry: | ||||
"""like a typical command entry (func, opts, help), but is lazy""" | """like a typical command entry (func, opts, help), but is lazy""" | ||||
def __init__(self, ui, name, definition, cmdtable, source): | def __init__(self, ui, name, definition, cmdtable, source): | ||||
self.ui = ui | self.ui = ui | ||||
self.name = name | self.name = name | ||||
self.definition = definition | self.definition = definition | ||||
self.cmdtable = cmdtable.copy() | self.cmdtable = cmdtable.copy() | ||||
self.source = source | self.source = source |
if leftside: | if leftside: | ||||
chars.reverse() | chars.reverse() | ||||
u = u''.join(chars).encode(_sysstr(encoding)) | u = u''.join(chars).encode(_sysstr(encoding)) | ||||
if leftside: | if leftside: | ||||
return ellipsis + u | return ellipsis + u | ||||
return u + ellipsis | return u + ellipsis | ||||
class normcasespecs(object): | class normcasespecs: | ||||
"""what a platform's normcase does to ASCII strings | """what a platform's normcase does to ASCII strings | ||||
This is specified per platform, and should be consistent with what normcase | This is specified per platform, and should be consistent with what normcase | ||||
on that platform actually does. | on that platform actually does. | ||||
lower: normcase lowercases ASCII strings | lower: normcase lowercases ASCII strings | ||||
upper: normcase uppercases ASCII strings | upper: normcase uppercases ASCII strings | ||||
other: the fallback function should always be called | other: the fallback function should always be called |
"""Byte-stringify exception in the same way as BaseException_str()""" | """Byte-stringify exception in the same way as BaseException_str()""" | ||||
if not exc.args: | if not exc.args: | ||||
return b'' | return b'' | ||||
if len(exc.args) == 1: | if len(exc.args) == 1: | ||||
return pycompat.bytestr(exc.args[0]) | return pycompat.bytestr(exc.args[0]) | ||||
return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args) | return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args) | ||||
class Hint(object): | class Hint: | ||||
"""Mix-in to provide a hint of an error | """Mix-in to provide a hint of an error | ||||
This should come first in the inheritance list to consume a hint and | This should come first in the inheritance list to consume a hint and | ||||
pass remaining arguments to the exception class. | pass remaining arguments to the exception class. | ||||
""" | """ | ||||
def __init__(self, *args, **kw): | def __init__(self, *args, **kw): | ||||
self.hint = kw.pop('hint', None) # type: Optional[bytes] | self.hint = kw.pop('hint', None) # type: Optional[bytes] |
# should be used. | # should be used. | ||||
# | # | ||||
# developer config: devel.legacy.exchange | # developer config: devel.legacy.exchange | ||||
exchange = ui.configlist(b'devel', b'legacy.exchange') | exchange = ui.configlist(b'devel', b'legacy.exchange') | ||||
forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange | forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange | ||||
return forcebundle1 or not op.remote.capable(b'bundle2') | return forcebundle1 or not op.remote.capable(b'bundle2') | ||||
class pushoperation(object): | class pushoperation: | ||||
"""A object that represent a single push operation | """A object that represent a single push operation | ||||
Its purpose is to carry push related state and very common operations. | Its purpose is to carry push related state and very common operations. | ||||
A new pushoperation should be created at the beginning of each push and | A new pushoperation should be created at the beginning of each push and | ||||
discarded afterward. | discarded afterward. | ||||
""" | """ | ||||
ui.status(bookmsgmap[action][0] % b) | ui.status(bookmsgmap[action][0] % b) | ||||
else: | else: | ||||
ui.warn(bookmsgmap[action][1] % b) | ui.warn(bookmsgmap[action][1] % b) | ||||
# discovery can have set the value form invalid entry | # discovery can have set the value form invalid entry | ||||
if pushop.bkresult is not None: | if pushop.bkresult is not None: | ||||
pushop.bkresult = 1 | pushop.bkresult = 1 | ||||
class pulloperation(object): | class pulloperation: | ||||
"""A object that represent a single pull operation | """A object that represent a single pull operation | ||||
It purpose is to carry pull related state and very common operation. | It purpose is to carry pull related state and very common operation. | ||||
A new should be created at the beginning of each pull and discarded | A new should be created at the beginning of each pull and discarded | ||||
afterward. | afterward. | ||||
""" | """ | ||||
currcls.__dict__[propname].func = wrap | currcls.__dict__[propname].func = wrap | ||||
break | break | ||||
if currcls is object: | if currcls is object: | ||||
raise AttributeError("type '%s' has no property '%s'" % (cls, propname)) | raise AttributeError("type '%s' has no property '%s'" % (cls, propname)) | ||||
class wrappedfunction(object): | class wrappedfunction: | ||||
'''context manager for temporarily wrapping a function''' | '''context manager for temporarily wrapping a function''' | ||||
def __init__(self, container, funcname, wrapper): | def __init__(self, container, funcname, wrapper): | ||||
assert callable(wrapper) | assert callable(wrapper) | ||||
self._container = container | self._container = container | ||||
self._funcname = funcname | self._funcname = funcname | ||||
self._wrapper = wrapper | self._wrapper = wrapper | ||||
error, | error, | ||||
extensions, | extensions, | ||||
registrar, | registrar, | ||||
) | ) | ||||
from hgdemandimport import tracing | from hgdemandimport import tracing | ||||
class exthelper(object): | class exthelper: | ||||
"""Helper for modular extension setup | """Helper for modular extension setup | ||||
A single helper should be instantiated for each module of an | A single helper should be instantiated for each module of an | ||||
extension, where a command or function needs to be wrapped, or a | extension, where a command or function needs to be wrapped, or a | ||||
command, extension hook, fileset, revset or template needs to be | command, extension hook, fileset, revset or template needs to be | ||||
registered. Helper methods are then used as decorators for | registered. Helper methods are then used as decorators for | ||||
these various purposes. If an extension spans multiple modules, | these various purposes. If an extension spans multiple modules, | ||||
all helper instances should be merged in the main module. | all helper instances should be merged in the main module. |
# -s/--str VALUE | # -s/--str VALUE | ||||
parsedopts.append((flag, args[pos + 1])) | parsedopts.append((flag, args[pos + 1])) | ||||
pos += 2 | pos += 2 | ||||
parsedargs.extend(args[pos:]) | parsedargs.extend(args[pos:]) | ||||
return parsedopts, parsedargs | return parsedopts, parsedargs | ||||
class customopt(object): # pytype: disable=ignored-metaclass | class customopt: # pytype: disable=ignored-metaclass | ||||
"""Manage defaults and mutations for any type of opt.""" | """Manage defaults and mutations for any type of opt.""" | ||||
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta | ||||
def __init__(self, defaultvalue): | def __init__(self, defaultvalue): | ||||
self._defaultvalue = defaultvalue | self._defaultvalue = defaultvalue | ||||
def _isboolopt(self): | def _isboolopt(self): |
from .utils import storageutil | from .utils import storageutil | ||||
from .revlogutils import ( | from .revlogutils import ( | ||||
constants as revlog_constants, | constants as revlog_constants, | ||||
rewrite, | rewrite, | ||||
) | ) | ||||
@interfaceutil.implementer(repository.ifilestorage) | @interfaceutil.implementer(repository.ifilestorage) | ||||
class filelog(object): | class filelog: | ||||
def __init__(self, opener, path): | def __init__(self, opener, path): | ||||
self._revlog = revlog.revlog( | self._revlog = revlog.revlog( | ||||
opener, | opener, | ||||
# XXX should use the unencoded path | # XXX should use the unencoded path | ||||
target=(revlog_constants.KIND_FILELOG, path), | target=(revlog_constants.KIND_FILELOG, path), | ||||
radix=b'/'.join((b'data', path)), | radix=b'/'.join((b'data', path)), | ||||
censorable=True, | censorable=True, | ||||
) | ) |
_otherchangedlocaldeletedmsg = _( | _otherchangedlocaldeletedmsg = _( | ||||
b"file '%(fd)s' was deleted in local%(l)s but was modified in other%(o)s.\n" | b"file '%(fd)s' was deleted in local%(l)s but was modified in other%(o)s.\n" | ||||
b"You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.\n" | b"You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.\n" | ||||
b"What do you want to do?" | b"What do you want to do?" | ||||
b"$$ &Changed $$ &Deleted $$ &Unresolved" | b"$$ &Changed $$ &Deleted $$ &Unresolved" | ||||
) | ) | ||||
class absentfilectx(object): | class absentfilectx: | ||||
"""Represents a file that's ostensibly in a context but is actually not | """Represents a file that's ostensibly in a context but is actually not | ||||
present in it. | present in it. | ||||
This is here because it's very specific to the filemerge code for now -- | This is here because it's very specific to the filemerge code for now -- | ||||
other code is likely going to break with the values this returns.""" | other code is likely going to break with the values this returns.""" | ||||
def __init__(self, ctx, f): | def __init__(self, ctx, f): | ||||
self._ctx = ctx | self._ctx = ctx |
b'or': ormatch, | b'or': ormatch, | ||||
b'minus': minusmatch, | b'minus': minusmatch, | ||||
b'list': listmatch, | b'list': listmatch, | ||||
b'not': notmatch, | b'not': notmatch, | ||||
b'func': func, | b'func': func, | ||||
} | } | ||||
class matchctx(object): | class matchctx: | ||||
def __init__(self, basectx, ctx, cwd, badfn=None): | def __init__(self, basectx, ctx, cwd, badfn=None): | ||||
self._basectx = basectx | self._basectx = basectx | ||||
self.ctx = ctx | self.ctx = ctx | ||||
self._badfn = badfn | self._badfn = badfn | ||||
self._match = None | self._match = None | ||||
self._status = None | self._status = None | ||||
self.cwd = cwd | self.cwd = cwd | ||||
write() and data() functions | write() and data() functions | ||||
Returns False if the object is unsupported or must be pre-processed by | Returns False if the object is unsupported or must be pre-processed by | ||||
formatdate(), formatdict(), or formatlist(). | formatdate(), formatdict(), or formatlist(). | ||||
""" | """ | ||||
return isinstance(obj, (type(None), bool, int, int, float, bytes)) | return isinstance(obj, (type(None), bool, int, int, float, bytes)) | ||||
class _nullconverter(object): | class _nullconverter: | ||||
'''convert non-primitive data types to be processed by formatter''' | '''convert non-primitive data types to be processed by formatter''' | ||||
# set to True if context object should be stored as item | # set to True if context object should be stored as item | ||||
storecontext = False | storecontext = False | ||||
@staticmethod | @staticmethod | ||||
def wrapnested(data, tmpl, sep): | def wrapnested(data, tmpl, sep): | ||||
'''wrap nested data by appropriate type''' | '''wrap nested data by appropriate type''' | ||||
return dict(data) | return dict(data) | ||||
@staticmethod | @staticmethod | ||||
def formatlist(data, name, fmt, sep): | def formatlist(data, name, fmt, sep): | ||||
'''convert iterable to appropriate list format''' | '''convert iterable to appropriate list format''' | ||||
return list(data) | return list(data) | ||||
class baseformatter(object): | class baseformatter: | ||||
# set to True if the formater output a strict format that does not support | # set to True if the formater output a strict format that does not support | ||||
# arbitrary output in the stream. | # arbitrary output in the stream. | ||||
strict_format = False | strict_format = False | ||||
def __init__(self, ui, topic, opts, converter): | def __init__(self, ui, topic, opts, converter): | ||||
self._ui = ui | self._ui = ui | ||||
self._topic = topic | self._topic = topic | ||||
def _iteritems(data): | def _iteritems(data): | ||||
'''iterate key-value pairs in stable order''' | '''iterate key-value pairs in stable order''' | ||||
if isinstance(data, dict): | if isinstance(data, dict): | ||||
return sorted(data.items()) | return sorted(data.items()) | ||||
return data | return data | ||||
class _plainconverter(object): | class _plainconverter: | ||||
'''convert non-primitive data types to text''' | '''convert non-primitive data types to text''' | ||||
storecontext = False | storecontext = False | ||||
@staticmethod | @staticmethod | ||||
def wrapnested(data, tmpl, sep): | def wrapnested(data, tmpl, sep): | ||||
raise error.ProgrammingError(b'plainformatter should never be nested') | raise error.ProgrammingError(b'plainformatter should never be nested') | ||||
self._out.write(b' "%s": %s' % (k, u)) | self._out.write(b' "%s": %s' % (k, u)) | ||||
self._out.write(b"\n }") | self._out.write(b"\n }") | ||||
def end(self): | def end(self): | ||||
baseformatter.end(self) | baseformatter.end(self) | ||||
self._out.write(b"\n]\n") | self._out.write(b"\n]\n") | ||||
class _templateconverter(object): | class _templateconverter: | ||||
'''convert non-primitive data types to be processed by templater''' | '''convert non-primitive data types to be processed by templater''' | ||||
storecontext = True | storecontext = True | ||||
@staticmethod | @staticmethod | ||||
def wrapnested(data, tmpl, sep): | def wrapnested(data, tmpl, sep): | ||||
'''wrap nested data by templatable type''' | '''wrap nested data by templatable type''' | ||||
return templateutil.mappinglist(data, tmpl=tmpl, sep=sep) | return templateutil.mappinglist(data, tmpl=tmpl, sep=sep) | ||||
return self._symbolsused[0] | return self._symbolsused[0] | ||||
def end(self): | def end(self): | ||||
baseformatter.end(self) | baseformatter.end(self) | ||||
self._renderitem(b'docfooter', {}) | self._renderitem(b'docfooter', {}) | ||||
@attr.s(frozen=True) | @attr.s(frozen=True) | ||||
class templatespec(object): | class templatespec: | ||||
ref = attr.ib() | ref = attr.ib() | ||||
tmpl = attr.ib() | tmpl = attr.ib() | ||||
mapfile = attr.ib() | mapfile = attr.ib() | ||||
refargs = attr.ib(default=None) | refargs = attr.ib(default=None) | ||||
fp = attr.ib(default=None) | fp = attr.ib(default=None) | ||||
def empty_templatespec(): | def empty_templatespec(): |
# remove edges that ended | # remove edges that ended | ||||
remove = [p for p, c in edgemap.items() if c is None] | remove = [p for p, c in edgemap.items() if c is None] | ||||
for parent in remove: | for parent in remove: | ||||
del edgemap[parent] | del edgemap[parent] | ||||
seen.remove(parent) | seen.remove(parent) | ||||
@attr.s | @attr.s | ||||
class asciistate(object): | class asciistate: | ||||
"""State of ascii() graph rendering""" | """State of ascii() graph rendering""" | ||||
seen = attr.ib(init=False, default=attr.Factory(list)) | seen = attr.ib(init=False, default=attr.Factory(list)) | ||||
edges = attr.ib(init=False, default=attr.Factory(dict)) | edges = attr.ib(init=False, default=attr.Factory(dict)) | ||||
lastcoldiff = attr.ib(init=False, default=0) | lastcoldiff = attr.ib(init=False, default=0) | ||||
lastindex = attr.ib(init=False, default=0) | lastindex = attr.ib(init=False, default=0) | ||||
styles = attr.ib(init=False, default=attr.Factory(EDGES.copy)) | styles = attr.ib(init=False, default=attr.Factory(EDGES.copy)) | ||||
graphshorten = attr.ib(init=False, default=False) | graphshorten = attr.ib(init=False, default=False) |
mstart, mend = match.span() | mstart, mend = match.span() | ||||
linenum += body.count(b'\n', begin, mstart) + 1 | linenum += body.count(b'\n', begin, mstart) + 1 | ||||
lstart = body.rfind(b'\n', begin, mstart) + 1 or begin | lstart = body.rfind(b'\n', begin, mstart) + 1 or begin | ||||
begin = body.find(b'\n', mend) + 1 or len(body) + 1 | begin = body.find(b'\n', mend) + 1 or len(body) + 1 | ||||
lend = begin - 1 | lend = begin - 1 | ||||
yield linenum, mstart - lstart, mend - lstart, body[lstart:lend] | yield linenum, mstart - lstart, mend - lstart, body[lstart:lend] | ||||
class linestate(object): | class linestate: | ||||
def __init__(self, line, linenum, colstart, colend): | def __init__(self, line, linenum, colstart, colend): | ||||
self.line = line | self.line = line | ||||
self.linenum = linenum | self.linenum = linenum | ||||
self.colstart = colstart | self.colstart = colstart | ||||
self.colend = colend | self.colend = colend | ||||
def __hash__(self): | def __hash__(self): | ||||
return hash(self.line) | return hash(self.line) | ||||
yield (b'-', a[i]) | yield (b'-', a[i]) | ||||
elif tag == 'replace': | elif tag == 'replace': | ||||
for i in pycompat.xrange(alo, ahi): | for i in pycompat.xrange(alo, ahi): | ||||
yield (b'-', a[i]) | yield (b'-', a[i]) | ||||
for i in pycompat.xrange(blo, bhi): | for i in pycompat.xrange(blo, bhi): | ||||
yield (b'+', b[i]) | yield (b'+', b[i]) | ||||
class grepsearcher(object): | class grepsearcher: | ||||
"""Search files and revisions for lines matching the given pattern | """Search files and revisions for lines matching the given pattern | ||||
Options: | Options: | ||||
- all_files to search unchanged files at that revision. | - all_files to search unchanged files at that revision. | ||||
- diff to search files in the parent revision so diffs can be generated. | - diff to search files in the parent revision so diffs can be generated. | ||||
- follow to skip files across copies and renames. | - follow to skip files across copies and renames. | ||||
""" | """ | ||||
foi = [ | foi = [ | ||||
(b'spath', b'00changelog.i'), | (b'spath', b'00changelog.i'), | ||||
(b'spath', b'phaseroots'), # ! phase can change content at the same size | (b'spath', b'phaseroots'), # ! phase can change content at the same size | ||||
(b'spath', b'obsstore'), | (b'spath', b'obsstore'), | ||||
(b'path', b'bookmarks'), # ! bookmark can change content at the same size | (b'path', b'bookmarks'), # ! bookmark can change content at the same size | ||||
] | ] | ||||
class cachedlocalrepo(object): | class cachedlocalrepo: | ||||
"""Holds a localrepository that can be cached and reused.""" | """Holds a localrepository that can be cached and reused.""" | ||||
def __init__(self, repo): | def __init__(self, repo): | ||||
"""Create a new cached repo from an existing repo. | """Create a new cached repo from an existing repo. | ||||
We assume the passed in repo was recently created. If the | We assume the passed in repo was recently created. If the | ||||
repo has changed between when it was created and when it was | repo has changed between when it was created and when it was | ||||
turned into a cache, it may not refresh properly. | turned into a cache, it may not refresh properly. |
return hgwebdir_mod.hgwebdir(config, baseui=baseui) | return hgwebdir_mod.hgwebdir(config, baseui=baseui) | ||||
return hgweb_mod.hgweb(config, name=name, baseui=baseui) | return hgweb_mod.hgweb(config, name=name, baseui=baseui) | ||||
def hgwebdir(config, baseui=None): | def hgwebdir(config, baseui=None): | ||||
return hgwebdir_mod.hgwebdir(config, baseui=baseui) | return hgwebdir_mod.hgwebdir(config, baseui=baseui) | ||||
class httpservice(object): | class httpservice: | ||||
def __init__(self, ui, app, opts): | def __init__(self, ui, app, opts): | ||||
self.ui = ui | self.ui = ui | ||||
self.app = app | self.app = app | ||||
self.opts = opts | self.opts = opts | ||||
def init(self): | def init(self): | ||||
procutil.setsignalhandler() | procutil.setsignalhandler() | ||||
self.httpd = server.create_server(self.ui, self.app) | self.httpd = server.create_server(self.ui, self.app) |
Exception.__init__(self, pycompat.sysstr(message)) | Exception.__init__(self, pycompat.sysstr(message)) | ||||
self.code = code | self.code = code | ||||
if headers is None: | if headers is None: | ||||
headers = [] | headers = [] | ||||
self.headers = headers | self.headers = headers | ||||
self.message = message | self.message = message | ||||
class continuereader(object): | class continuereader: | ||||
"""File object wrapper to handle HTTP 100-continue. | """File object wrapper to handle HTTP 100-continue. | ||||
This is used by servers so they automatically handle Expect: 100-continue | This is used by servers so they automatically handle Expect: 100-continue | ||||
request headers. On first read of the request body, the 100 Continue | request headers. On first read of the request body, the 100 Continue | ||||
response is sent. This should trigger the client into actually sending | response is sent. This should trigger the client into actually sending | ||||
the request body. | the request body. | ||||
""" | """ | ||||
for pathel in reversed(pathitems): | for pathel in reversed(pathitems): | ||||
if not pathel or not urlel: | if not pathel or not urlel: | ||||
break | break | ||||
breadcrumb.append({b'url': urlel, b'name': pathel}) | breadcrumb.append({b'url': urlel, b'name': pathel}) | ||||
urlel = os.path.dirname(urlel) | urlel = os.path.dirname(urlel) | ||||
return templateutil.mappinglist(reversed(breadcrumb)) | return templateutil.mappinglist(reversed(breadcrumb)) | ||||
class requestcontext(object): | class requestcontext: | ||||
"""Holds state/context for an individual request. | """Holds state/context for an individual request. | ||||
Servers can be multi-threaded. Holding state on the WSGI application | Servers can be multi-threaded. Holding state on the WSGI application | ||||
is prone to race conditions. Instances of this class exist to hold | is prone to race conditions. Instances of this class exist to hold | ||||
mutable and race-free state for requests. | mutable and race-free state for requests. | ||||
""" | """ | ||||
def __init__(self, app, repo, req, res): | def __init__(self, app, repo, req, res): | ||||
def sendtemplate(self, name, **kwargs): | def sendtemplate(self, name, **kwargs): | ||||
"""Helper function to send a response generated from a template.""" | """Helper function to send a response generated from a template.""" | ||||
kwargs = pycompat.byteskwargs(kwargs) | kwargs = pycompat.byteskwargs(kwargs) | ||||
self.res.setbodygen(self.tmpl.generate(name, kwargs)) | self.res.setbodygen(self.tmpl.generate(name, kwargs)) | ||||
return self.res.sendresponse() | return self.res.sendresponse() | ||||
class hgweb(object): | class hgweb: | ||||
"""HTTP server for individual repositories. | """HTTP server for individual repositories. | ||||
Instances of this class serve HTTP responses for a particular | Instances of this class serve HTTP responses for a particular | ||||
repository. | repository. | ||||
Instances are typically used as WSGI applications. | Instances are typically used as WSGI applications. | ||||
Some servers are multi-threaded. On these servers, there may | Some servers are multi-threaded. On these servers, there may |
def indexentries( | def indexentries( | ||||
ui, repos, req, stripecount, sortcolumn=b'', descending=False, subdir=b'' | ui, repos, req, stripecount, sortcolumn=b'', descending=False, subdir=b'' | ||||
): | ): | ||||
args = (ui, repos, req, stripecount, sortcolumn, descending, subdir) | args = (ui, repos, req, stripecount, sortcolumn, descending, subdir) | ||||
return templateutil.mappinggenerator(_indexentriesgen, args=args) | return templateutil.mappinggenerator(_indexentriesgen, args=args) | ||||
class hgwebdir(object): | class hgwebdir: | ||||
"""HTTP server for multiple repositories. | """HTTP server for multiple repositories. | ||||
Given a configuration, different repositories will be served depending | Given a configuration, different repositories will be served depending | ||||
on the request path. | on the request path. | ||||
Instances are typically used as WSGI applications. | Instances are typically used as WSGI applications. | ||||
""" | """ | ||||
pycompat, | pycompat, | ||||
util, | util, | ||||
) | ) | ||||
from ..utils import ( | from ..utils import ( | ||||
urlutil, | urlutil, | ||||
) | ) | ||||
class multidict(object): | class multidict: | ||||
"""A dict like object that can store multiple values for a key. | """A dict like object that can store multiple values for a key. | ||||
Used to store parsed request parameters. | Used to store parsed request parameters. | ||||
This is inspired by WebOb's class of the same name. | This is inspired by WebOb's class of the same name. | ||||
""" | """ | ||||
def __init__(self): | def __init__(self): | ||||
return vals[0] | return vals[0] | ||||
def asdictoflists(self): | def asdictoflists(self): | ||||
return {k: list(v) for k, v in self._items.items()} | return {k: list(v) for k, v in self._items.items()} | ||||
@attr.s(frozen=True) | @attr.s(frozen=True) | ||||
class parsedrequest(object): | class parsedrequest: | ||||
"""Represents a parsed WSGI request. | """Represents a parsed WSGI request. | ||||
Contains both parsed parameters as well as a handle on the input stream. | Contains both parsed parameters as well as a handle on the input stream. | ||||
""" | """ | ||||
# Request method. | # Request method. | ||||
method = attr.ib() | method = attr.ib() | ||||
# Full URL for this request. | # Full URL for this request. | ||||
querystring=querystring, | querystring=querystring, | ||||
qsparams=qsparams, | qsparams=qsparams, | ||||
headers=headers, | headers=headers, | ||||
bodyfh=bodyfh, | bodyfh=bodyfh, | ||||
rawenv=env, | rawenv=env, | ||||
) | ) | ||||
class offsettrackingwriter(object): | class offsettrackingwriter: | ||||
"""A file object like object that is append only and tracks write count. | """A file object like object that is append only and tracks write count. | ||||
Instances are bound to a callable. This callable is called with data | Instances are bound to a callable. This callable is called with data | ||||
whenever a ``write()`` is attempted. | whenever a ``write()`` is attempted. | ||||
Instances track the amount of written data so they can answer ``tell()`` | Instances track the amount of written data so they can answer ``tell()`` | ||||
requests. | requests. | ||||
def flush(self): | def flush(self): | ||||
pass | pass | ||||
def tell(self): | def tell(self): | ||||
return self._offset | return self._offset | ||||
class wsgiresponse(object): | class wsgiresponse: | ||||
"""Represents a response to a WSGI request. | """Represents a response to a WSGI request. | ||||
A response consists of a status line, headers, and a body. | A response consists of a status line, headers, and a body. | ||||
Consumers must populate the ``status`` and ``headers`` fields and | Consumers must populate the ``status`` and ``headers`` fields and | ||||
make a call to a ``setbody*()`` method before the response can be | make a call to a ``setbody*()`` method before the response can be | ||||
issued. | issued. | ||||
""" | """ | ||||
if '?' in uri: | if '?' in uri: | ||||
path, query = uri.split('?', 1) | path, query = uri.split('?', 1) | ||||
else: | else: | ||||
path, query = uri, r'' | path, query = uri, r'' | ||||
return urlreq.unquote(path), query | return urlreq.unquote(path), query | ||||
class _error_logger(object): | class _error_logger: | ||||
def __init__(self, handler): | def __init__(self, handler): | ||||
self.handler = handler | self.handler = handler | ||||
def flush(self): | def flush(self): | ||||
pass | pass | ||||
def write(self, str): | def write(self, str): | ||||
self.writelines(str.split(b'\n')) | self.writelines(str.split(b'\n')) | ||||
threading.active_count() # silence pyflakes and bypass demandimport | threading.active_count() # silence pyflakes and bypass demandimport | ||||
_mixin = socketserver.ThreadingMixIn | _mixin = socketserver.ThreadingMixIn | ||||
except ImportError: | except ImportError: | ||||
if util.safehasattr(os, b"fork"): | if util.safehasattr(os, b"fork"): | ||||
_mixin = socketserver.ForkingMixIn | _mixin = socketserver.ForkingMixIn | ||||
else: | else: | ||||
class _mixin(object): | class _mixin: | ||||
pass | pass | ||||
def openlog(opt, default): | def openlog(opt, default): | ||||
if opt and opt != b'-': | if opt and opt != b'-': | ||||
return open(opt, b'ab') | return open(opt, b'ab') | ||||
return default | return default | ||||
from ..utils import stringutil | from ..utils import stringutil | ||||
from . import webutil | from . import webutil | ||||
__all__ = [] | __all__ = [] | ||||
commands = {} | commands = {} | ||||
class webcommand(object): | class webcommand: | ||||
"""Decorator used to register a web command handler. | """Decorator used to register a web command handler. | ||||
The decorator takes as its positional arguments the name/path the | The decorator takes as its positional arguments the name/path the | ||||
command should be accessible under. | command should be accessible under. | ||||
When called, functions receive as arguments a ``requestcontext``, | When called, functions receive as arguments a ``requestcontext``, | ||||
``wsgirequest``, and a templater instance for generatoring output. | ``wsgirequest``, and a templater instance for generatoring output. | ||||
The functions should populate the ``rctx.res`` object with details | The functions should populate the ``rctx.res`` object with details |
while step <= firststep: | while step <= firststep: | ||||
step *= 10 | step *= 10 | ||||
while True: | while True: | ||||
yield 1 * step | yield 1 * step | ||||
yield 3 * step | yield 3 * step | ||||
step *= 10 | step *= 10 | ||||
class revnav(object): | class revnav: | ||||
def __init__(self, repo): | def __init__(self, repo): | ||||
"""Navigation generation object | """Navigation generation object | ||||
:repo: repo object we generate nav for | :repo: repo object we generate nav for | ||||
""" | """ | ||||
# used for hex generation | # used for hex generation | ||||
self._revlog = repo.changelog | self._revlog = repo.changelog | ||||
value = value.replace(b'\\', b'\\\\').replace(b'"', r'\"') | value = value.replace(b'\\', b'\\\\').replace(b'"', r'\"') | ||||
return b'%s="%s"' % (param, value) | return b'%s="%s"' % (param, value) | ||||
else: | else: | ||||
return b'%s=%s' % (param, value) | return b'%s=%s' % (param, value) | ||||
else: | else: | ||||
return param | return param | ||||
class Headers(object): | class Headers: | ||||
"""Manage a collection of HTTP response headers""" | """Manage a collection of HTTP response headers""" | ||||
def __init__(self, headers=None): | def __init__(self, headers=None): | ||||
headers = headers if headers is not None else [] | headers = headers if headers is not None else [] | ||||
if type(headers) is not list: | if type(headers) is not list: | ||||
raise TypeError(b"Headers must be a list of name/value tuples") | raise TypeError(b"Headers must be a list of name/value tuples") | ||||
self._headers = headers | self._headers = headers | ||||
if __debug__: | if __debug__: |
urlutil, | urlutil, | ||||
) | ) | ||||
urlerr = util.urlerr | urlerr = util.urlerr | ||||
urlreq = util.urlreq | urlreq = util.urlreq | ||||
# moved here from url.py to avoid a cycle | # moved here from url.py to avoid a cycle | ||||
class httpsendfile(object): | class httpsendfile: | ||||
"""This is a wrapper around the objects returned by python's "open". | """This is a wrapper around the objects returned by python's "open". | ||||
Its purpose is to send file-like objects via HTTP. | Its purpose is to send file-like objects via HTTP. | ||||
It do however not define a __len__ attribute because the length | It do however not define a __len__ attribute because the length | ||||
might be more than Py_ssize_t can handle. | might be more than Py_ssize_t can handle. | ||||
""" | """ | ||||
def __init__(self, ui, *args, **kwargs): | def __init__(self, ui, *args, **kwargs): |
n = 0 | n = 0 | ||||
for i in pycompat.xrange(0, len(value), valuelen): | for i in pycompat.xrange(0, len(value), valuelen): | ||||
n += 1 | n += 1 | ||||
result.append((fmt % str(n), pycompat.strurl(value[i : i + valuelen]))) | result.append((fmt % str(n), pycompat.strurl(value[i : i + valuelen]))) | ||||
return result | return result | ||||
class _multifile(object): | class _multifile: | ||||
def __init__(self, *fileobjs): | def __init__(self, *fileobjs): | ||||
for f in fileobjs: | for f in fileobjs: | ||||
if not util.safehasattr(f, b'length'): | if not util.safehasattr(f, b'length'): | ||||
raise ValueError( | raise ValueError( | ||||
b'_multifile only supports file objects that ' | b'_multifile only supports file objects that ' | ||||
b'have a length but this one does not:', | b'have a length but this one does not:', | ||||
type(f), | type(f), | ||||
f, | f, |
"""Unified peer interface for wire protocol version 2 peers.""" | """Unified peer interface for wire protocol version 2 peers.""" | ||||
apidescriptor = interfaceutil.Attribute( | apidescriptor = interfaceutil.Attribute( | ||||
"""Data structure holding description of server API.""" | """Data structure holding description of server API.""" | ||||
) | ) | ||||
@interfaceutil.implementer(ipeerbase) | @interfaceutil.implementer(ipeerbase) | ||||
class peer(object): | class peer: | ||||
"""Base class for peer repositories.""" | """Base class for peer repositories.""" | ||||
limitedarguments = False | limitedarguments = False | ||||
def capable(self, name): | def capable(self, name): | ||||
caps = self.capabilities() | caps = self.capabilities() | ||||
if name in caps: | if name in caps: | ||||
return True | return True |
if encoding.environ.get(b'HGREALINTERFACES'): | if encoding.environ.get(b'HGREALINTERFACES'): | ||||
from ..thirdparty.zope import interface as zi | from ..thirdparty.zope import interface as zi | ||||
Attribute = zi.Attribute | Attribute = zi.Attribute | ||||
Interface = zi.Interface | Interface = zi.Interface | ||||
implementer = zi.implementer | implementer = zi.implementer | ||||
else: | else: | ||||
class Attribute(object): | class Attribute: | ||||
def __init__(self, __name__, __doc__=b''): | def __init__(self, __name__, __doc__=b''): | ||||
pass | pass | ||||
class Interface(object): | class Interface: | ||||
def __init__( | def __init__( | ||||
self, name, bases=(), attrs=None, __doc__=None, __module__=None | self, name, bases=(), attrs=None, __doc__=None, __module__=None | ||||
): | ): | ||||
pass | pass | ||||
def implementer(*ifaces): | def implementer(*ifaces): | ||||
def wrapper(cls): | def wrapper(cls): | ||||
return cls | return cls | ||||
return wrapper | return wrapper |
httplib = util.httplib | httplib = util.httplib | ||||
urlerr = util.urlerr | urlerr = util.urlerr | ||||
urlreq = util.urlreq | urlreq = util.urlreq | ||||
DEBUG = None | DEBUG = None | ||||
class ConnectionManager(object): | class ConnectionManager: | ||||
""" | """ | ||||
The connection manager must be able to: | The connection manager must be able to: | ||||
* keep track of all existing | * keep track of all existing | ||||
""" | """ | ||||
def __init__(self): | def __init__(self): | ||||
self._lock = threading.Lock() | self._lock = threading.Lock() | ||||
self._hostmap = collections.defaultdict(list) # host -> [connection] | self._hostmap = collections.defaultdict(list) # host -> [connection] | ||||
def get_all(self, host=None): | def get_all(self, host=None): | ||||
if host: | if host: | ||||
return list(self._hostmap[host]) | return list(self._hostmap[host]) | ||||
else: | else: | ||||
return dict(self._hostmap) | return dict(self._hostmap) | ||||
class KeepAliveHandler(object): | class KeepAliveHandler: | ||||
def __init__(self, timeout=None): | def __init__(self, timeout=None): | ||||
self._cm = ConnectionManager() | self._cm = ConnectionManager() | ||||
self._timeout = timeout | self._timeout = timeout | ||||
self.requestscount = 0 | self.requestscount = 0 | ||||
self.sentbytescount = 0 | self.sentbytescount = 0 | ||||
#### Connection Management | #### Connection Management | ||||
def open_connections(self): | def open_connections(self): | ||||
return diff | return diff | ||||
def test_timeout(url): | def test_timeout(url): | ||||
global DEBUG | global DEBUG | ||||
dbbackup = DEBUG | dbbackup = DEBUG | ||||
class FakeLogger(object): | class FakeLogger: | ||||
def debug(self, msg, *args): | def debug(self, msg, *args): | ||||
print(msg % args) | print(msg % args) | ||||
info = warning = error = debug | info = warning = error = debug | ||||
DEBUG = FakeLogger() | DEBUG = FakeLogger() | ||||
print(b" fetching the file to establish a connection") | print(b" fetching the file to establish a connection") | ||||
fo = urlreq.urlopen(url) | fo = urlreq.urlopen(url) |
_llentry = struct.Struct(b'>II') | _llentry = struct.Struct(b'>II') | ||||
class LineLogError(Exception): | class LineLogError(Exception): | ||||
"""Error raised when something bad happens internally in linelog.""" | """Error raised when something bad happens internally in linelog.""" | ||||
@attr.s | @attr.s | ||||
class lineinfo(object): | class lineinfo: | ||||
# Introducing revision of this line. | # Introducing revision of this line. | ||||
rev = attr.ib() | rev = attr.ib() | ||||
# Line number for this line in its introducing revision. | # Line number for this line in its introducing revision. | ||||
linenum = attr.ib() | linenum = attr.ib() | ||||
# Private. Offset in the linelog program of this line. Used internally. | # Private. Offset in the linelog program of this line. Used internally. | ||||
_offset = attr.ib() | _offset = attr.ib() | ||||
@attr.s | @attr.s | ||||
class annotateresult(object): | class annotateresult: | ||||
rev = attr.ib() | rev = attr.ib() | ||||
lines = attr.ib() | lines = attr.ib() | ||||
_eof = attr.ib() | _eof = attr.ib() | ||||
def __iter__(self): | def __iter__(self): | ||||
return iter(self.lines) | return iter(self.lines) | ||||
class _llinstruction(object): # pytype: disable=ignored-metaclass | class _llinstruction: # pytype: disable=ignored-metaclass | ||||
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta | ||||
@abc.abstractmethod | @abc.abstractmethod | ||||
def __init__(self, op1, op2): | def __init__(self, op1, op2): | ||||
pass | pass | ||||
@abc.abstractmethod | @abc.abstractmethod | ||||
return _jge(op1, op2) | return _jge(op1, op2) | ||||
elif opcode == 1: | elif opcode == 1: | ||||
return _jl(op1, op2) | return _jl(op1, op2) | ||||
elif opcode == 2: | elif opcode == 2: | ||||
return _line(op1, op2) | return _line(op1, op2) | ||||
raise NotImplementedError(b'Unimplemented opcode %r' % opcode) | raise NotImplementedError(b'Unimplemented opcode %r' % opcode) | ||||
class linelog(object): | class linelog: | ||||
"""Efficient cache for per-line history information.""" | """Efficient cache for per-line history information.""" | ||||
def __init__(self, program=None, maxrev=0): | def __init__(self, program=None, maxrev=0): | ||||
if program is None: | if program is None: | ||||
# We pad the program with an extra leading EOF so that our | # We pad the program with an extra leading EOF so that our | ||||
# offsets will match the C code exactly. This means we can | # offsets will match the C code exactly. This means we can | ||||
# interoperate with the C code. | # interoperate with the C code. | ||||
program = [_eof(0, 0), _eof(0, 0)] | program = [_eof(0, 0), _eof(0, 0)] |
b'known', | b'known', | ||||
b'getbundle', | b'getbundle', | ||||
b'unbundle', | b'unbundle', | ||||
} | } | ||||
legacycaps = moderncaps.union({b'changegroupsubset'}) | legacycaps = moderncaps.union({b'changegroupsubset'}) | ||||
@interfaceutil.implementer(repository.ipeercommandexecutor) | @interfaceutil.implementer(repository.ipeercommandexecutor) | ||||
class localcommandexecutor(object): | class localcommandexecutor: | ||||
def __init__(self, peer): | def __init__(self, peer): | ||||
self._peer = peer | self._peer = peer | ||||
self._sent = False | self._sent = False | ||||
self._closed = False | self._closed = False | ||||
def __enter__(self): | def __enter__(self): | ||||
return self | return self | ||||
def makemain(**kwargs): | def makemain(**kwargs): | ||||
"""Produce a type conforming to ``ilocalrepositorymain``.""" | """Produce a type conforming to ``ilocalrepositorymain``.""" | ||||
return localrepository | return localrepository | ||||
@interfaceutil.implementer(repository.ilocalrepositoryfilestorage) | @interfaceutil.implementer(repository.ilocalrepositoryfilestorage) | ||||
class revlogfilestorage(object): | class revlogfilestorage: | ||||
"""File storage when using revlogs.""" | """File storage when using revlogs.""" | ||||
def file(self, path): | def file(self, path): | ||||
if path.startswith(b'/'): | if path.startswith(b'/'): | ||||
path = path[1:] | path = path[1:] | ||||
return filelog.filelog(self.svfs, path) | return filelog.filelog(self.svfs, path) | ||||
@interfaceutil.implementer(repository.ilocalrepositoryfilestorage) | @interfaceutil.implementer(repository.ilocalrepositoryfilestorage) | ||||
class revlognarrowfilestorage(object): | class revlognarrowfilestorage: | ||||
"""File storage when using revlogs and narrow files.""" | """File storage when using revlogs and narrow files.""" | ||||
def file(self, path): | def file(self, path): | ||||
if path.startswith(b'/'): | if path.startswith(b'/'): | ||||
path = path[1:] | path = path[1:] | ||||
return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch) | return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch) | ||||
# functions can be wrapped. | # functions can be wrapped. | ||||
REPO_INTERFACES = [ | REPO_INTERFACES = [ | ||||
(repository.ilocalrepositorymain, lambda: makemain), | (repository.ilocalrepositorymain, lambda: makemain), | ||||
(repository.ilocalrepositoryfilestorage, lambda: makefilestorage), | (repository.ilocalrepositoryfilestorage, lambda: makefilestorage), | ||||
] | ] | ||||
@interfaceutil.implementer(repository.ilocalrepositorymain) | @interfaceutil.implementer(repository.ilocalrepositorymain) | ||||
class localrepository(object): | class localrepository: | ||||
"""Main class for representing local repositories. | """Main class for representing local repositories. | ||||
All local repositories are instances of this class. | All local repositories are instances of this class. | ||||
Constructed on its own, instances of this class are not usable as | Constructed on its own, instances of this class are not usable as | ||||
repository objects. To obtain a usable repository object, call | repository objects. To obtain a usable repository object, call | ||||
``hg.repository()``, ``localrepo.instance()``, or | ``hg.repository()``, ``localrepo.instance()``, or | ||||
``localrepo.makelocalrepository()``. The latter is the lowest-level. | ``localrepo.makelocalrepository()``. The latter is the lowest-level. | ||||
@filteredpropertycache | @filteredpropertycache | ||||
def _tagscache(self): | def _tagscache(self): | ||||
"""Returns a tagscache object that contains various tags related | """Returns a tagscache object that contains various tags related | ||||
caches.""" | caches.""" | ||||
# This simplifies its cache management by having one decorated | # This simplifies its cache management by having one decorated | ||||
# function (this one) and the rest simply fetch things from it. | # function (this one) and the rest simply fetch things from it. | ||||
class tagscache(object): | class tagscache: | ||||
def __init__(self): | def __init__(self): | ||||
# These two define the set of tags for this repository. tags | # These two define the set of tags for this repository. tags | ||||
# maps tag name to node; tagtypes maps tag name to 'global' or | # maps tag name to node; tagtypes maps tag name to 'global' or | ||||
# 'local'. (Global tags are defined by .hgtags across all | # 'local'. (Global tags are defined by .hgtags across all | ||||
# heads, and local tags are defined in .hg/localtags.) | # heads, and local tags are defined in .hg/localtags.) | ||||
# They constitute the in-memory cache of tags. | # They constitute the in-memory cache of tags. | ||||
self.tags = self.tagtypes = None | self.tags = self.tagtypes = None | ||||
# Perform any cleanup on the instance. | # Perform any cleanup on the instance. | ||||
repo.close() | repo.close() | ||||
# Our strategy is to replace the type of the object with one that | # Our strategy is to replace the type of the object with one that | ||||
# has all attribute lookups result in error. | # has all attribute lookups result in error. | ||||
# | # | ||||
# But we have to allow the close() method because some constructors | # But we have to allow the close() method because some constructors | ||||
# of repos call close() on repo references. | # of repos call close() on repo references. | ||||
class poisonedrepository(object): | class poisonedrepository: | ||||
def __getattribute__(self, item): | def __getattribute__(self, item): | ||||
if item == 'close': | if item == 'close': | ||||
return object.__getattribute__(self, item) | return object.__getattribute__(self, item) | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'repo instances should not be used after unshare' | b'repo instances should not be used after unshare' | ||||
) | ) | ||||
def close(self): | def close(self): | ||||
pass | pass | ||||
# We may have a repoview, which intercepts __setattr__. So be sure | # We may have a repoview, which intercepts __setattr__. So be sure | ||||
# we operate at the lowest level possible. | # we operate at the lowest level possible. | ||||
object.__setattr__(repo, '__class__', poisonedrepository) | object.__setattr__(repo, '__class__', poisonedrepository) |
ui.warn(_(b"got lock after %d seconds\n") % l.delay) | ui.warn(_(b"got lock after %d seconds\n") % l.delay) | ||||
else: | else: | ||||
ui.debug(b"got lock after %d seconds\n" % l.delay) | ui.debug(b"got lock after %d seconds\n" % l.delay) | ||||
if l.acquirefn: | if l.acquirefn: | ||||
l.acquirefn() | l.acquirefn() | ||||
return l | return l | ||||
class lock(object): | class lock: | ||||
"""An advisory lock held by one process to control access to a set | """An advisory lock held by one process to control access to a set | ||||
of files. Non-cooperating processes or incorrectly written scripts | of files. Non-cooperating processes or incorrectly written scripts | ||||
can ignore Mercurial's locking scheme and stomp all over the | can ignore Mercurial's locking scheme and stomp all over the | ||||
repository, so don't do that. | repository, so don't do that. | ||||
Typically used via localrepository.lock() to lock the repository | Typically used via localrepository.lock() to lock the repository | ||||
store (.hg/store/) or localrepository.wlock() to lock everything | store (.hg/store/) or localrepository.wlock() to lock everything | ||||
else under .hg/.""" | else under .hg/.""" |
submatch, | submatch, | ||||
changes=changes, | changes=changes, | ||||
stat=stat, | stat=stat, | ||||
fp=fp, | fp=fp, | ||||
prefix=subprefix, | prefix=subprefix, | ||||
) | ) | ||||
class changesetdiffer(object): | class changesetdiffer: | ||||
"""Generate diff of changeset with pre-configured filtering functions""" | """Generate diff of changeset with pre-configured filtering functions""" | ||||
def _makefilematcher(self, ctx): | def _makefilematcher(self, ctx): | ||||
return scmutil.matchall(ctx.repo()) | return scmutil.matchall(ctx.repo()) | ||||
def _makehunksfilter(self, ctx): | def _makehunksfilter(self, ctx): | ||||
return None | return None | ||||
labels.append(b'changeset.obsolete') | labels.append(b'changeset.obsolete') | ||||
if ctx.isunstable(): | if ctx.isunstable(): | ||||
labels.append(b'changeset.unstable') | labels.append(b'changeset.unstable') | ||||
for instability in ctx.instabilities(): | for instability in ctx.instabilities(): | ||||
labels.append(b'instability.%s' % instability) | labels.append(b'instability.%s' % instability) | ||||
return b' '.join(labels) | return b' '.join(labels) | ||||
class changesetprinter(object): | class changesetprinter: | ||||
'''show changeset information when templating not requested.''' | '''show changeset information when templating not requested.''' | ||||
def __init__(self, ui, repo, differ=None, diffopts=None, buffered=False): | def __init__(self, ui, repo, differ=None, diffopts=None, buffered=False): | ||||
self.ui = ui | self.ui = ui | ||||
self.repo = repo | self.repo = repo | ||||
self.buffered = buffered | self.buffered = buffered | ||||
self._differ = differ or changesetdiffer() | self._differ = differ or changesetdiffer() | ||||
self._diffopts = patch.diffallopts(ui, diffopts) | self._diffopts = patch.diffallopts(ui, diffopts) | ||||
if not spec.ref and not spec.tmpl and not spec.mapfile: | if not spec.ref and not spec.tmpl and not spec.mapfile: | ||||
return changesetprinter(ui, repo, *postargs) | return changesetprinter(ui, repo, *postargs) | ||||
return changesettemplater(ui, repo, spec, *postargs) | return changesettemplater(ui, repo, spec, *postargs) | ||||
@attr.s | @attr.s | ||||
class walkopts(object): | class walkopts: | ||||
"""Options to configure a set of revisions and file matcher factory | """Options to configure a set of revisions and file matcher factory | ||||
to scan revision/file history | to scan revision/file history | ||||
""" | """ | ||||
# raw command-line parameters, which a matcher will be built from | # raw command-line parameters, which a matcher will be built from | ||||
pats = attr.ib() | pats = attr.ib() | ||||
opts = attr.ib() | opts = attr.ib() | ||||
pid = procutil.getpid() | pid = procutil.getpid() | ||||
return b'%s (%d)> %s' % (date, pid, msg) | return b'%s (%d)> %s' % (date, pid, msg) | ||||
def _matchevent(event, tracked): | def _matchevent(event, tracked): | ||||
return b'*' in tracked or event in tracked | return b'*' in tracked or event in tracked | ||||
class filelogger(object): | class filelogger: | ||||
"""Basic logger backed by physical file with optional rotation""" | """Basic logger backed by physical file with optional rotation""" | ||||
def __init__(self, vfs, name, tracked, maxfiles=0, maxsize=0): | def __init__(self, vfs, name, tracked, maxfiles=0, maxsize=0): | ||||
self._vfs = vfs | self._vfs = vfs | ||||
self._name = name | self._name = name | ||||
self._trackedevents = set(tracked) | self._trackedevents = set(tracked) | ||||
self._maxfiles = maxfiles | self._maxfiles = maxfiles | ||||
self._maxsize = maxsize | self._maxsize = maxsize | ||||
fp.write(line) | fp.write(line) | ||||
except IOError as err: | except IOError as err: | ||||
ui.debug( | ui.debug( | ||||
b'cannot write to %s: %s\n' | b'cannot write to %s: %s\n' | ||||
% (self._name, stringutil.forcebytestr(err)) | % (self._name, stringutil.forcebytestr(err)) | ||||
) | ) | ||||
class fileobjectlogger(object): | class fileobjectlogger: | ||||
"""Basic logger backed by file-like object""" | """Basic logger backed by file-like object""" | ||||
def __init__(self, fp, tracked): | def __init__(self, fp, tracked): | ||||
self._fp = fp | self._fp = fp | ||||
self._trackedevents = set(tracked) | self._trackedevents = set(tracked) | ||||
def tracked(self, event): | def tracked(self, event): | ||||
return _matchevent(event, self._trackedevents) | return _matchevent(event, self._trackedevents) | ||||
def log(self, ui, event, msg, opts): | def log(self, ui, event, msg, opts): | ||||
line = _formatlogline(msg) | line = _formatlogline(msg) | ||||
try: | try: | ||||
self._fp.write(line) | self._fp.write(line) | ||||
self._fp.flush() | self._fp.flush() | ||||
except IOError as err: | except IOError as err: | ||||
ui.debug( | ui.debug( | ||||
b'cannot write to %s: %s\n' | b'cannot write to %s: %s\n' | ||||
% ( | % ( | ||||
stringutil.forcebytestr(self._fp.name), | stringutil.forcebytestr(self._fp.name), | ||||
stringutil.forcebytestr(err), | stringutil.forcebytestr(err), | ||||
) | ) | ||||
) | ) | ||||
class proxylogger(object): | class proxylogger: | ||||
"""Forward log events to another logger to be set later""" | """Forward log events to another logger to be set later""" | ||||
def __init__(self): | def __init__(self): | ||||
self.logger = None | self.logger = None | ||||
def tracked(self, event): | def tracked(self, event): | ||||
return self.logger is not None and self.logger.tracked(event) | return self.logger is not None and self.logger.tracked(event) | ||||
def log(self, ui, event, msg, opts): | def log(self, ui, event, msg, opts): | ||||
assert self.logger is not None | assert self.logger is not None | ||||
self.logger.log(ui, event, msg, opts) | self.logger.log(ui, event, msg, opts) |
p.enable(subcalls=True, builtins=True) | p.enable(subcalls=True, builtins=True) | ||||
try: | try: | ||||
f(*args, **kwds) | f(*args, **kwds) | ||||
finally: | finally: | ||||
p.disable() | p.disable() | ||||
return Stats(p.getstats()) | return Stats(p.getstats()) | ||||
class Stats(object): | class Stats: | ||||
"""XXX docstring""" | """XXX docstring""" | ||||
def __init__(self, data): | def __init__(self, data): | ||||
self.data = data | self.data = data | ||||
def sort(self, crit="inlinetime"): | def sort(self, crit="inlinetime"): | ||||
"""XXX docstring""" | """XXX docstring""" | ||||
# profiler_entries isn't defined when running under PyPy. | # profiler_entries isn't defined when running under PyPy. |
else: | else: | ||||
return b'%s %s:%d' % ( | return b'%s %s:%d' % ( | ||||
pycompat.sysbytes(code.co_name), | pycompat.sysbytes(code.co_name), | ||||
pycompat.sysbytes(code.co_filename), | pycompat.sysbytes(code.co_filename), | ||||
code.co_firstlineno, | code.co_firstlineno, | ||||
) | ) | ||||
class KCacheGrind(object): | class KCacheGrind: | ||||
def __init__(self, profiler): | def __init__(self, profiler): | ||||
self.data = profiler.getstats() | self.data = profiler.getstats() | ||||
self.out_file = None | self.out_file = None | ||||
def output(self, out_file): | def output(self, out_file): | ||||
self.out_file = out_file | self.out_file = out_file | ||||
out_file.write(b'events: Ticks\n') | out_file.write(b'events: Ticks\n') | ||||
self._print_summary() | self._print_summary() |
# if this is changed to support newlines in filenames, | # if this is changed to support newlines in filenames, | ||||
# be sure to check the templates/ dir again (especially *-raw.tmpl) | # be sure to check the templates/ dir again (especially *-raw.tmpl) | ||||
lines.append(b"%s\0%s%s\n" % (f, hex(n), fl)) | lines.append(b"%s\0%s%s\n" % (f, hex(n), fl)) | ||||
_checkforbidden(files) | _checkforbidden(files) | ||||
return b''.join(lines) | return b''.join(lines) | ||||
class lazymanifestiter(object): | class lazymanifestiter: | ||||
def __init__(self, lm): | def __init__(self, lm): | ||||
self.pos = 0 | self.pos = 0 | ||||
self.lm = lm | self.lm = lm | ||||
def __iter__(self): | def __iter__(self): | ||||
return self | return self | ||||
def next(self): | def next(self): | ||||
try: | try: | ||||
data, pos = self.lm._get(self.pos) | data, pos = self.lm._get(self.pos) | ||||
except IndexError: | except IndexError: | ||||
raise StopIteration | raise StopIteration | ||||
if pos == -1: | if pos == -1: | ||||
self.pos += 1 | self.pos += 1 | ||||
return data[0] | return data[0] | ||||
self.pos += 1 | self.pos += 1 | ||||
zeropos = data.find(b'\x00', pos) | zeropos = data.find(b'\x00', pos) | ||||
return data[pos:zeropos] | return data[pos:zeropos] | ||||
__next__ = next | __next__ = next | ||||
class lazymanifestiterentries(object): | class lazymanifestiterentries: | ||||
def __init__(self, lm): | def __init__(self, lm): | ||||
self.lm = lm | self.lm = lm | ||||
self.pos = 0 | self.pos = 0 | ||||
def __iter__(self): | def __iter__(self): | ||||
return self | return self | ||||
def next(self): | def next(self): | ||||
def _cmp(a, b): | def _cmp(a, b): | ||||
return (a > b) - (a < b) | return (a > b) - (a < b) | ||||
_manifestflags = {b'', b'l', b't', b'x'} | _manifestflags = {b'', b'l', b't', b'x'} | ||||
class _lazymanifest(object): | class _lazymanifest: | ||||
"""A pure python manifest backed by a byte string. It is supplimented with | """A pure python manifest backed by a byte string. It is supplimented with | ||||
internal lists as it is modified, until it is compacted back to a pure byte | internal lists as it is modified, until it is compacted back to a pure byte | ||||
string. | string. | ||||
``data`` is the initial manifest data. | ``data`` is the initial manifest data. | ||||
``positions`` is a list of offsets, one per manifest entry. Positive | ``positions`` is a list of offsets, one per manifest entry. Positive | ||||
values are offsets into ``data``, negative values are offsets into the | values are offsets into ``data``, negative values are offsets into the | ||||
try: | try: | ||||
_lazymanifest = parsers.lazymanifest | _lazymanifest = parsers.lazymanifest | ||||
except AttributeError: | except AttributeError: | ||||
pass | pass | ||||
@interfaceutil.implementer(repository.imanifestdict) | @interfaceutil.implementer(repository.imanifestdict) | ||||
class manifestdict(object): | class manifestdict: | ||||
def __init__(self, nodelen, data=b''): | def __init__(self, nodelen, data=b''): | ||||
self._nodelen = nodelen | self._nodelen = nodelen | ||||
self._lm = _lazymanifest(nodelen, data) | self._lm = _lazymanifest(nodelen, data) | ||||
def __getitem__(self, key): | def __getitem__(self, key): | ||||
return self._lm[key][0] | return self._lm[key][0] | ||||
def find(self, key): | def find(self, key): | ||||
else: | else: | ||||
return b'', f | return b'', f | ||||
_noop = lambda s: None | _noop = lambda s: None | ||||
@interfaceutil.implementer(repository.imanifestdict) | @interfaceutil.implementer(repository.imanifestdict) | ||||
class treemanifest(object): | class treemanifest: | ||||
def __init__(self, nodeconstants, dir=b'', text=b''): | def __init__(self, nodeconstants, dir=b'', text=b''): | ||||
self._dir = dir | self._dir = dir | ||||
self.nodeconstants = nodeconstants | self.nodeconstants = nodeconstants | ||||
self._node = self.nodeconstants.nullid | self._node = self.nodeconstants.nullid | ||||
self._nodelen = self.nodeconstants.nodelen | self._nodelen = self.nodeconstants.nodelen | ||||
self._loadfunc = _noop | self._loadfunc = _noop | ||||
self._copyfunc = _noop | self._copyfunc = _noop | ||||
self._dirty = False | self._dirty = False | ||||
MAXCOMPRESSION = 3 | MAXCOMPRESSION = 3 | ||||
class FastdeltaUnavailable(Exception): | class FastdeltaUnavailable(Exception): | ||||
"""Exception raised when fastdelta isn't usable on a manifest.""" | """Exception raised when fastdelta isn't usable on a manifest.""" | ||||
@interfaceutil.implementer(repository.imanifeststorage) | @interfaceutil.implementer(repository.imanifeststorage) | ||||
class manifestrevlog(object): | class manifestrevlog: | ||||
"""A revlog that stores manifest texts. This is responsible for caching the | """A revlog that stores manifest texts. This is responsible for caching the | ||||
full-text manifest contents. | full-text manifest contents. | ||||
""" | """ | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
nodeconstants, | nodeconstants, | ||||
opener, | opener, | ||||
return self._revlog.opener | return self._revlog.opener | ||||
@opener.setter | @opener.setter | ||||
def opener(self, value): | def opener(self, value): | ||||
self._revlog.opener = value | self._revlog.opener = value | ||||
@interfaceutil.implementer(repository.imanifestlog) | @interfaceutil.implementer(repository.imanifestlog) | ||||
class manifestlog(object): | class manifestlog: | ||||
"""A collection class representing the collection of manifest snapshots | """A collection class representing the collection of manifest snapshots | ||||
referenced by commits in the repository. | referenced by commits in the repository. | ||||
In this situation, 'manifest' refers to the abstract concept of a snapshot | In this situation, 'manifest' refers to the abstract concept of a snapshot | ||||
of the list of files in the given commit. Consumers of the output of this | of the list of files in the given commit. Consumers of the output of this | ||||
class do not care about the implementation details of the actual manifests | class do not care about the implementation details of the actual manifests | ||||
they receive (i.e. tree or flat or lazily loaded, etc).""" | they receive (i.e. tree or flat or lazily loaded, etc).""" | ||||
def rev(self, node): | def rev(self, node): | ||||
return self._rootstore.rev(node) | return self._rootstore.rev(node) | ||||
def update_caches(self, transaction): | def update_caches(self, transaction): | ||||
return self._rootstore._revlog.update_caches(transaction=transaction) | return self._rootstore._revlog.update_caches(transaction=transaction) | ||||
@interfaceutil.implementer(repository.imanifestrevisionwritable) | @interfaceutil.implementer(repository.imanifestrevisionwritable) | ||||
class memmanifestctx(object): | class memmanifestctx: | ||||
def __init__(self, manifestlog): | def __init__(self, manifestlog): | ||||
self._manifestlog = manifestlog | self._manifestlog = manifestlog | ||||
self._manifestdict = manifestdict(manifestlog.nodeconstants.nodelen) | self._manifestdict = manifestdict(manifestlog.nodeconstants.nodelen) | ||||
def _storage(self): | def _storage(self): | ||||
return self._manifestlog.getstorage(b'') | return self._manifestlog.getstorage(b'') | ||||
def copy(self): | def copy(self): | ||||
p2, | p2, | ||||
added, | added, | ||||
removed, | removed, | ||||
match=match, | match=match, | ||||
) | ) | ||||
@interfaceutil.implementer(repository.imanifestrevisionstored) | @interfaceutil.implementer(repository.imanifestrevisionstored) | ||||
class manifestctx(object): | class manifestctx: | ||||
"""A class representing a single revision of a manifest, including its | """A class representing a single revision of a manifest, including its | ||||
contents, its parent revs, and its linkrev. | contents, its parent revs, and its linkrev. | ||||
""" | """ | ||||
def __init__(self, manifestlog, node): | def __init__(self, manifestlog, node): | ||||
self._manifestlog = manifestlog | self._manifestlog = manifestlog | ||||
self._data = None | self._data = None | ||||
d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r)) | d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r)) | ||||
return manifestdict(store.nodeconstants.nodelen, d) | return manifestdict(store.nodeconstants.nodelen, d) | ||||
def find(self, key): | def find(self, key): | ||||
return self.read().find(key) | return self.read().find(key) | ||||
@interfaceutil.implementer(repository.imanifestrevisionwritable) | @interfaceutil.implementer(repository.imanifestrevisionwritable) | ||||
class memtreemanifestctx(object): | class memtreemanifestctx: | ||||
def __init__(self, manifestlog, dir=b''): | def __init__(self, manifestlog, dir=b''): | ||||
self._manifestlog = manifestlog | self._manifestlog = manifestlog | ||||
self._dir = dir | self._dir = dir | ||||
self._treemanifest = treemanifest(manifestlog.nodeconstants) | self._treemanifest = treemanifest(manifestlog.nodeconstants) | ||||
def _storage(self): | def _storage(self): | ||||
return self._manifestlog.getstorage(b'') | return self._manifestlog.getstorage(b'') | ||||
added, | added, | ||||
removed, | removed, | ||||
readtree=readtree, | readtree=readtree, | ||||
match=match, | match=match, | ||||
) | ) | ||||
@interfaceutil.implementer(repository.imanifestrevisionstored) | @interfaceutil.implementer(repository.imanifestrevisionstored) | ||||
class treemanifestctx(object): | class treemanifestctx: | ||||
def __init__(self, manifestlog, dir, node): | def __init__(self, manifestlog, dir, node): | ||||
self._manifestlog = manifestlog | self._manifestlog = manifestlog | ||||
self._dir = dir | self._dir = dir | ||||
self._data = None | self._data = None | ||||
self._node = node | self._node = node | ||||
# TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that | # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that |
% (pat, stringutil.forcebytestr(inst.strerror)) | % (pat, stringutil.forcebytestr(inst.strerror)) | ||||
) | ) | ||||
continue | continue | ||||
# else: re or relre - which cannot be normalized | # else: re or relre - which cannot be normalized | ||||
kindpats.append((kind, pat, b'')) | kindpats.append((kind, pat, b'')) | ||||
return kindpats | return kindpats | ||||
class basematcher(object): | class basematcher: | ||||
def __init__(self, badfn=None): | def __init__(self, badfn=None): | ||||
if badfn is not None: | if badfn is not None: | ||||
self.bad = badfn | self.bad = badfn | ||||
def __call__(self, fn): | def __call__(self, fn): | ||||
return self.matchfn(fn) | return self.matchfn(fn) | ||||
# Callbacks related to how the matcher is used by dirstate.walk. | # Callbacks related to how the matcher is used by dirstate.walk. | ||||
@encoding.strmethod | @encoding.strmethod | ||||
def __repr__(self): | def __repr__(self): | ||||
return b'<patternmatcher patterns=%r>' % pycompat.bytestr(self._pats) | return b'<patternmatcher patterns=%r>' % pycompat.bytestr(self._pats) | ||||
# This is basically a reimplementation of pathutil.dirs that stores the | # This is basically a reimplementation of pathutil.dirs that stores the | ||||
# children instead of just a count of them, plus a small optional optimization | # children instead of just a count of them, plus a small optional optimization | ||||
# to avoid some directories we don't need. | # to avoid some directories we don't need. | ||||
class _dirchildren(object): | class _dirchildren: | ||||
def __init__(self, paths, onlyinclude=None): | def __init__(self, paths, onlyinclude=None): | ||||
self._dirs = {} | self._dirs = {} | ||||
self._onlyinclude = onlyinclude or [] | self._onlyinclude = onlyinclude or [] | ||||
addpath = self.addpath | addpath = self.addpath | ||||
for f in paths: | for f in paths: | ||||
addpath(f) | addpath(f) | ||||
def addpath(self, path): | def addpath(self, path): |
fixws = bdiff.fixws | fixws = bdiff.fixws | ||||
patches = mpatch.patches | patches = mpatch.patches | ||||
patchedsize = mpatch.patchedsize | patchedsize = mpatch.patchedsize | ||||
textdiff = bdiff.bdiff | textdiff = bdiff.bdiff | ||||
splitnewlines = bdiff.splitnewlines | splitnewlines = bdiff.splitnewlines | ||||
# TODO: this looks like it could be an attrs, which might help pytype | # TODO: this looks like it could be an attrs, which might help pytype | ||||
class diffopts(object): | class diffopts: | ||||
"""context is the number of context lines | """context is the number of context lines | ||||
text treats all files as text | text treats all files as text | ||||
showfunc enables diff -p output | showfunc enables diff -p output | ||||
git enables the git extended patch format | git enables the git extended patch format | ||||
nodates removes dates from diff headers | nodates removes dates from diff headers | ||||
nobinary ignores binary files | nobinary ignores binary files | ||||
noprefix disables the 'a/' and 'b/' prefixes (ignored in plain mode) | noprefix disables the 'a/' and 'b/' prefixes (ignored in plain mode) | ||||
ignorews ignores all whitespace changes in the diff | ignorews ignores all whitespace changes in the diff |
return ( | return ( | ||||
repo.wvfs.audit.check(f) | repo.wvfs.audit.check(f) | ||||
and repo.wvfs.isfileorlink(f) | and repo.wvfs.isfileorlink(f) | ||||
and repo.dirstate.normalize(f) not in repo.dirstate | and repo.dirstate.normalize(f) not in repo.dirstate | ||||
and mctx[f2].cmp(wctx[f]) | and mctx[f2].cmp(wctx[f]) | ||||
) | ) | ||||
class _unknowndirschecker(object): | class _unknowndirschecker: | ||||
""" | """ | ||||
Look for any unknown files or directories that may have a path conflict | Look for any unknown files or directories that may have a path conflict | ||||
with a file. If any path prefix of the file exists as a file or link, | with a file. If any path prefix of the file exists as a file or link, | ||||
then it conflicts. If the file itself is a directory that contains any | then it conflicts. If the file itself is a directory that contains any | ||||
file that is not tracked, then it conflicts. | file that is not tracked, then it conflicts. | ||||
Returns the shortest path at which a conflict occurs, or None if there is | Returns the shortest path at which a conflict occurs, or None if there is | ||||
no conflict. | no conflict. | ||||
) | ) | ||||
hint = _(b'merging in the other direction may work') | hint = _(b'merging in the other direction may work') | ||||
raise error.Abort(msg % f, hint=hint) | raise error.Abort(msg % f, hint=hint) | ||||
else: | else: | ||||
msg = _(b'conflict in file \'%s\' is outside narrow clone') | msg = _(b'conflict in file \'%s\' is outside narrow clone') | ||||
raise error.StateError(msg % f) | raise error.StateError(msg % f) | ||||
class mergeresult(object): | class mergeresult: | ||||
"""An object representing result of merging manifests. | """An object representing result of merging manifests. | ||||
It has information about what actions need to be performed on dirstate | It has information about what actions need to be performed on dirstate | ||||
mapping of divergent renames and other such cases.""" | mapping of divergent renames and other such cases.""" | ||||
def __init__(self): | def __init__(self): | ||||
""" | """ | ||||
filemapping: dict of filename as keys and action related info as values | filemapping: dict of filename as keys and action related info as values | ||||
ctx.rev(), | ctx.rev(), | ||||
matchfiles(repo, files), | matchfiles(repo, files), | ||||
) | ) | ||||
], | ], | ||||
) | ) | ||||
@attr.s(frozen=True) | @attr.s(frozen=True) | ||||
class updateresult(object): | class updateresult: | ||||
updatedcount = attr.ib() | updatedcount = attr.ib() | ||||
mergedcount = attr.ib() | mergedcount = attr.ib() | ||||
removedcount = attr.ib() | removedcount = attr.ib() | ||||
unresolvedcount = attr.ib() | unresolvedcount = attr.ib() | ||||
def isempty(self): | def isempty(self): | ||||
return not ( | return not ( | ||||
self.updatedcount | self.updatedcount |
# This record was release in 3.7 and usage was removed in 5.6 | # This record was release in 3.7 and usage was removed in 5.6 | ||||
LEGACY_MERGE_DRIVER_MERGE = b'D' | LEGACY_MERGE_DRIVER_MERGE = b'D' | ||||
CHANGE_ADDED = b'added' | CHANGE_ADDED = b'added' | ||||
CHANGE_REMOVED = b'removed' | CHANGE_REMOVED = b'removed' | ||||
CHANGE_MODIFIED = b'modified' | CHANGE_MODIFIED = b'modified' | ||||
class MergeAction(object): | class MergeAction: | ||||
"""represent an "action" merge need to take for a given file | """represent an "action" merge need to take for a given file | ||||
Attributes: | Attributes: | ||||
_short: internal representation used to identify each action | _short: internal representation used to identify each action | ||||
no_op: True if the action does affect the file content or tracking status | no_op: True if the action does affect the file content or tracking status | ||||
CONVERT_MERGE_ACTIONS = ( | CONVERT_MERGE_ACTIONS = ( | ||||
ACTION_MERGE, | ACTION_MERGE, | ||||
ACTION_DIR_RENAME_MOVE_LOCAL, | ACTION_DIR_RENAME_MOVE_LOCAL, | ||||
ACTION_CHANGED_DELETED, | ACTION_CHANGED_DELETED, | ||||
ACTION_DELETED_CHANGED, | ACTION_DELETED_CHANGED, | ||||
) | ) | ||||
class _mergestate_base(object): | class _mergestate_base: | ||||
"""track 3-way merge state of individual files | """track 3-way merge state of individual files | ||||
The merge state is stored on disk when needed. Two files are used: one with | The merge state is stored on disk when needed. Two files are used: one with | ||||
an old format (version 1), and one with a new format (version 2). Version 2 | an old format (version 1), and one with a new format (version 2). Version 2 | ||||
stores a superset of the data in version 1, including new kinds of records | stores a superset of the data in version 1, including new kinds of records | ||||
in the future. For more about the new format, see the documentation for | in the future. For more about the new format, see the documentation for | ||||
`_readrecordsv2`. | `_readrecordsv2`. | ||||
) | ) | ||||
from .revlogutils import ( | from .revlogutils import ( | ||||
flagutil as sidedataflag, | flagutil as sidedataflag, | ||||
sidedata as sidedatamod, | sidedata as sidedatamod, | ||||
) | ) | ||||
class ChangingFiles(object): | class ChangingFiles: | ||||
"""A class recording the changes made to files by a changeset | """A class recording the changes made to files by a changeset | ||||
Actions performed on files are gathered into 3 sets: | Actions performed on files are gathered into 3 sets: | ||||
- added: files actively added in the changeset. | - added: files actively added in the changeset. | ||||
- merged: files whose history got merged | - merged: files whose history got merged | ||||
- removed: files removed in the revision | - removed: files removed in the revision | ||||
- salvaged: files that might have been deleted by a merge but were not | - salvaged: files that might have been deleted by a merge but were not |
a convenience method to return an empty list instead of None | a convenience method to return an empty list instead of None | ||||
""" | """ | ||||
if val is None: | if val is None: | ||||
return [] | return [] | ||||
else: | else: | ||||
return [val] | return [val] | ||||
class namespaces(object): | class namespaces: | ||||
"""provides an interface to register and operate on multiple namespaces. See | """provides an interface to register and operate on multiple namespaces. See | ||||
the namespace class below for details on the namespace object. | the namespace class below for details on the namespace object. | ||||
""" | """ | ||||
_names_version = 0 | _names_version = 0 | ||||
def __init__(self): | def __init__(self): | ||||
""" | """ | ||||
for ns, v in self._names.items(): | for ns, v in self._names.items(): | ||||
n = v.singlenode(repo, name) | n = v.singlenode(repo, name) | ||||
if n: | if n: | ||||
return n | return n | ||||
raise KeyError(_(b'no such name: %s') % name) | raise KeyError(_(b'no such name: %s') % name) | ||||
class namespace(object): | class namespace: | ||||
"""provides an interface to a namespace | """provides an interface to a namespace | ||||
Namespaces are basically generic many-to-many mapping between some | Namespaces are basically generic many-to-many mapping between some | ||||
(namespaced) names and nodes. The goal here is to control the pollution of | (namespaced) names and nodes. The goal here is to control the pollution of | ||||
jamming things into tags or bookmarks (in extension-land) and to simplify | jamming things into tags or bookmarks (in extension-land) and to simplify | ||||
internal bits of mercurial: log output, tab completion, etc. | internal bits of mercurial: log output, tab completion, etc. | ||||
More precisely, we define a mapping of names to nodes, and a mapping from | More precisely, we define a mapping of names to nodes, and a mapping from |
nullrev = -1 | nullrev = -1 | ||||
# pseudo identifier for working directory | # pseudo identifier for working directory | ||||
# (experimental, so don't add too many dependencies on it) | # (experimental, so don't add too many dependencies on it) | ||||
wdirrev = 0x7FFFFFFF | wdirrev = 0x7FFFFFFF | ||||
class sha1nodeconstants(object): | class sha1nodeconstants: | ||||
nodelen = 20 | nodelen = 20 | ||||
# In hex, this is '0000000000000000000000000000000000000000' | # In hex, this is '0000000000000000000000000000000000000000' | ||||
nullid = b"\0" * nodelen | nullid = b"\0" * nodelen | ||||
nullhex = hex(nullid) | nullhex = hex(nullid) | ||||
# Phony node value to stand-in for new files in some uses of | # Phony node value to stand-in for new files in some uses of | ||||
# manifests. | # manifests. |
raise error.Abort( | raise error.Abort( | ||||
_( | _( | ||||
b'bad obsolescence marker detected: ' | b'bad obsolescence marker detected: ' | ||||
b'invalid successors nullid' | b'invalid successors nullid' | ||||
) | ) | ||||
) | ) | ||||
class obsstore(object): | class obsstore: | ||||
"""Store obsolete markers | """Store obsolete markers | ||||
Markers can be accessed with two mappings: | Markers can be accessed with two mappings: | ||||
- predecessors[x] -> set(markers on predecessors edges of x) | - predecessors[x] -> set(markers on predecessors edges of x) | ||||
- successors[x] -> set(markers on successors edges of x) | - successors[x] -> set(markers on successors edges of x) | ||||
- children[x] -> set(markers on predecessors edges of children(x) | - children[x] -> set(markers on predecessors edges of children(x) | ||||
""" | """ | ||||
# as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>. | # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>. | ||||
# This flag mean that the successors express the changes between the public and | # This flag mean that the successors express the changes between the public and | ||||
# bumped version and fix the situation, breaking the transitivity of | # bumped version and fix the situation, breaking the transitivity of | ||||
# "bumped" here. | # "bumped" here. | ||||
bumpedfix = 1 | bumpedfix = 1 | ||||
usingsha256 = 2 | usingsha256 = 2 | ||||
class marker(object): | class marker: | ||||
"""Wrap obsolete marker raw data""" | """Wrap obsolete marker raw data""" | ||||
def __init__(self, repo, data): | def __init__(self, repo, data): | ||||
# the repo argument will be used to create changectx in later version | # the repo argument will be used to create changectx in later version | ||||
self._repo = repo | self._repo = repo | ||||
self._data = data | self._data = data | ||||
self._decodedmeta = None | self._decodedmeta = None | ||||
from .i18n import _ | from .i18n import _ | ||||
from . import ( | from . import ( | ||||
error, | error, | ||||
util, | util, | ||||
) | ) | ||||
from .utils import stringutil | from .utils import stringutil | ||||
class parser(object): | class parser: | ||||
def __init__(self, elements, methods=None): | def __init__(self, elements, methods=None): | ||||
self._elements = elements | self._elements = elements | ||||
self._methods = methods | self._methods = methods | ||||
self.current = None | self.current = None | ||||
def _advance(self): | def _advance(self): | ||||
"""advance the tokenizer""" | """advance the tokenizer""" | ||||
t = self.current | t = self.current | ||||
def parseerrordetail(inst): | def parseerrordetail(inst): | ||||
"""Compose error message from specified ParseError object""" | """Compose error message from specified ParseError object""" | ||||
if inst.location is not None: | if inst.location is not None: | ||||
return _(b'at %d: %s') % (inst.location, inst.message) | return _(b'at %d: %s') % (inst.location, inst.message) | ||||
else: | else: | ||||
return inst.message | return inst.message | ||||
class alias(object): | class alias: | ||||
"""Parsed result of alias""" | """Parsed result of alias""" | ||||
def __init__(self, name, args, err, replacement): | def __init__(self, name, args, err, replacement): | ||||
self.name = name | self.name = name | ||||
self.args = args | self.args = args | ||||
self.error = err | self.error = err | ||||
self.replacement = replacement | self.replacement = replacement | ||||
# whether own `error` information is already shown or not. | # whether own `error` information is already shown or not. | ||||
# this avoids showing same warning multiple times at each | # this avoids showing same warning multiple times at each | ||||
# `expandaliases`. | # `expandaliases`. | ||||
self.warned = False | self.warned = False | ||||
class basealiasrules(object): | class basealiasrules: | ||||
"""Parsing and expansion rule set of aliases | """Parsing and expansion rule set of aliases | ||||
This is a helper for fileset/revset/template aliases. A concrete rule set | This is a helper for fileset/revset/template aliases. A concrete rule set | ||||
should be made by sub-classing this and implementing class/static methods. | should be made by sub-classing this and implementing class/static methods. | ||||
It supports alias expansion of symbol and function-call styles:: | It supports alias expansion of symbol and function-call styles:: | ||||
# decl = defn | # decl = defn |
cur.append(line) | cur.append(line) | ||||
if cur: | if cur: | ||||
yield chunk(cur) | yield chunk(cur) | ||||
def remainder(cur): | def remainder(cur): | ||||
yield chunk(cur) | yield chunk(cur) | ||||
class fiter(object): | class fiter: | ||||
def __init__(self, fp): | def __init__(self, fp): | ||||
self.fp = fp | self.fp = fp | ||||
def __iter__(self): | def __iter__(self): | ||||
return self | return self | ||||
def next(self): | def next(self): | ||||
l = self.fp.readline() | l = self.fp.readline() | ||||
data[b'p2'] = parents.pop(0) | data[b'p2'] = parents.pop(0) | ||||
if diffs_seen: | if diffs_seen: | ||||
data[b'filename'] = tmpname | data[b'filename'] = tmpname | ||||
return data | return data | ||||
class patchmeta(object): | class patchmeta: | ||||
"""Patched file metadata | """Patched file metadata | ||||
'op' is the performed operation within ADD, DELETE, RENAME, MODIFY | 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY | ||||
or COPY. 'path' is patched file path. 'oldpath' is set to the | or COPY. 'path' is patched file path. 'oldpath' is set to the | ||||
origin file when 'op' is either COPY or RENAME, None otherwise. If | origin file when 'op' is either COPY or RENAME, None otherwise. If | ||||
file mode is changed, 'mode' is a tuple (islink, isexec) where | file mode is changed, 'mode' is a tuple (islink, isexec) where | ||||
'islink' is True if the file is a symlink and 'isexec' is True if | 'islink' is True if the file is a symlink and 'isexec' is True if | ||||
the file is executable. Otherwise, 'mode' is None. | the file is executable. Otherwise, 'mode' is None. | ||||
elif line.startswith(b'GIT binary patch'): | elif line.startswith(b'GIT binary patch'): | ||||
gp.binary = True | gp.binary = True | ||||
if gp: | if gp: | ||||
gitpatches.append(gp) | gitpatches.append(gp) | ||||
return gitpatches | return gitpatches | ||||
class linereader(object): | class linereader: | ||||
# simple class to allow pushing lines back into the input stream | # simple class to allow pushing lines back into the input stream | ||||
def __init__(self, fp): | def __init__(self, fp): | ||||
self.fp = fp | self.fp = fp | ||||
self.buf = [] | self.buf = [] | ||||
def push(self, line): | def push(self, line): | ||||
if line is not None: | if line is not None: | ||||
self.buf.append(line) | self.buf.append(line) | ||||
def readline(self): | def readline(self): | ||||
if self.buf: | if self.buf: | ||||
l = self.buf[0] | l = self.buf[0] | ||||
del self.buf[0] | del self.buf[0] | ||||
return l | return l | ||||
return self.fp.readline() | return self.fp.readline() | ||||
def __iter__(self): | def __iter__(self): | ||||
return iter(self.readline, b'') | return iter(self.readline, b'') | ||||
class abstractbackend(object): | class abstractbackend: | ||||
def __init__(self, ui): | def __init__(self, ui): | ||||
self.ui = ui | self.ui = ui | ||||
def getfile(self, fname): | def getfile(self, fname): | ||||
"""Return target file data and flags as a (data, (islink, | """Return target file data and flags as a (data, (islink, | ||||
isexec)) tuple. Data is None if file is missing/deleted. | isexec)) tuple. Data is None if file is missing/deleted. | ||||
""" | """ | ||||
raise NotImplementedError | raise NotImplementedError | ||||
# deleted, and should not be considered by | # deleted, and should not be considered by | ||||
# marktouched(). | # marktouched(). | ||||
changed.discard(f) | changed.discard(f) | ||||
if changed: | if changed: | ||||
scmutil.marktouched(self.repo, changed, self.similarity) | scmutil.marktouched(self.repo, changed, self.similarity) | ||||
return sorted(self.changed) | return sorted(self.changed) | ||||
class filestore(object): | class filestore: | ||||
def __init__(self, maxsize=None): | def __init__(self, maxsize=None): | ||||
self.opener = None | self.opener = None | ||||
self.files = {} | self.files = {} | ||||
self.created = 0 | self.created = 0 | ||||
self.maxsize = maxsize | self.maxsize = maxsize | ||||
if self.maxsize is None: | if self.maxsize is None: | ||||
self.maxsize = 4 * (2 ** 20) | self.maxsize = 4 * (2 ** 20) | ||||
self.size = 0 | self.size = 0 | ||||
# @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1 | # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1 | ||||
unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@') | unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@') | ||||
contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)') | contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)') | ||||
eolmodes = [b'strict', b'crlf', b'lf', b'auto'] | eolmodes = [b'strict', b'crlf', b'lf', b'auto'] | ||||
class patchfile(object): | class patchfile: | ||||
def __init__(self, ui, gp, backend, store, eolmode=b'strict'): | def __init__(self, ui, gp, backend, store, eolmode=b'strict'): | ||||
self.fname = gp.path | self.fname = gp.path | ||||
self.eolmode = eolmode | self.eolmode = eolmode | ||||
self.eol = None | self.eol = None | ||||
self.backend = backend | self.backend = backend | ||||
self.ui = ui | self.ui = ui | ||||
self.lines = [] | self.lines = [] | ||||
self.exists = False | self.exists = False | ||||
def close(self): | def close(self): | ||||
if self.dirty: | if self.dirty: | ||||
self.writelines(self.fname, self.lines, self.mode) | self.writelines(self.fname, self.lines, self.mode) | ||||
self.write_rej() | self.write_rej() | ||||
return len(self.rej) | return len(self.rej) | ||||
class header(object): | class header: | ||||
"""patch header""" | """patch header""" | ||||
diffgit_re = re.compile(b'diff --git a/(.*) b/(.*)$') | diffgit_re = re.compile(b'diff --git a/(.*) b/(.*)$') | ||||
diff_re = re.compile(b'diff -r .* (.*)$') | diff_re = re.compile(b'diff -r .* (.*)$') | ||||
allhunks_re = re.compile(b'(?:index|deleted file) ') | allhunks_re = re.compile(b'(?:index|deleted file) ') | ||||
pretty_re = re.compile(b'(?:new file|deleted file) ') | pretty_re = re.compile(b'(?:new file|deleted file) ') | ||||
special_re = re.compile(b'(?:index|deleted|copy|rename|new mode) ') | special_re = re.compile(b'(?:index|deleted|copy|rename|new mode) ') | ||||
newfile_re = re.compile(b'(?:new file|copy to|rename to)') | newfile_re = re.compile(b'(?:new file|copy to|rename to)') | ||||
# if they have some content as we want to be able to change it | # if they have some content as we want to be able to change it | ||||
nocontent = len(self.header) == 2 | nocontent = len(self.header) == 2 | ||||
emptynewfile = self.isnewfile() and nocontent | emptynewfile = self.isnewfile() and nocontent | ||||
return emptynewfile or any( | return emptynewfile or any( | ||||
self.special_re.match(h) for h in self.header | self.special_re.match(h) for h in self.header | ||||
) | ) | ||||
class recordhunk(object): | class recordhunk: | ||||
"""patch hunk | """patch hunk | ||||
XXX shouldn't we merge this with the other hunk class? | XXX shouldn't we merge this with the other hunk class? | ||||
""" | """ | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
header, | header, | ||||
sum( | sum( | ||||
[h for h in applied.values() if h[0].special() or len(h) > 1], | [h for h in applied.values() if h[0].special() or len(h) > 1], | ||||
[], | [], | ||||
), | ), | ||||
{}, | {}, | ||||
) | ) | ||||
class hunk(object): | class hunk: | ||||
def __init__(self, desc, num, lr, context): | def __init__(self, desc, num, lr, context): | ||||
self.number = num | self.number = num | ||||
self.desc = desc | self.desc = desc | ||||
self.hunk = [desc] | self.hunk = [desc] | ||||
self.a = [] | self.a = [] | ||||
self.b = [] | self.b = [] | ||||
self.starta = self.lena = None | self.starta = self.lena = None | ||||
self.startb = self.lenb = None | self.startb = self.lenb = None | ||||
# zero length hunk ranges already have their start decremented | # zero length hunk ranges already have their start decremented | ||||
if self.lena and oldstart > 0: | if self.lena and oldstart > 0: | ||||
oldstart -= 1 | oldstart -= 1 | ||||
if self.lenb and newstart > 0: | if self.lenb and newstart > 0: | ||||
newstart -= 1 | newstart -= 1 | ||||
return old, oldstart, new, newstart | return old, oldstart, new, newstart | ||||
class binhunk(object): | class binhunk: | ||||
"""A binary patch file.""" | """A binary patch file.""" | ||||
def __init__(self, lr, fname): | def __init__(self, lr, fname): | ||||
self.text = None | self.text = None | ||||
self.delta = False | self.delta = False | ||||
self.hunk = [b'GIT binary patch\n'] | self.hunk = [b'GIT binary patch\n'] | ||||
self._fname = fname | self._fname = fname | ||||
self._read(lr) | self._read(lr) | ||||
+6.1 | +6.1 | ||||
+6.2 | +6.2 | ||||
7 | 7 | ||||
@@ -8,1 +9,2 @@ | @@ -8,1 +9,2 @@ | ||||
8 | 8 | ||||
+9 | +9 | ||||
""" | """ | ||||
class parser(object): | class parser: | ||||
"""patch parsing state machine""" | """patch parsing state machine""" | ||||
def __init__(self): | def __init__(self): | ||||
self.fromline = 0 | self.fromline = 0 | ||||
self.toline = 0 | self.toline = 0 | ||||
self.proc = b'' | self.proc = b'' | ||||
self.header = None | self.header = None | ||||
self.context = [] | self.context = [] |
parsers = policy.importmod('parsers') | parsers = policy.importmod('parsers') | ||||
def _lowerclean(s): | def _lowerclean(s): | ||||
# type: (bytes) -> bytes | # type: (bytes) -> bytes | ||||
return encoding.hfsignoreclean(s.lower()) | return encoding.hfsignoreclean(s.lower()) | ||||
class pathauditor(object): | class pathauditor: | ||||
"""ensure that a filesystem path contains no banned components. | """ensure that a filesystem path contains no banned components. | ||||
the following properties of a path are checked: | the following properties of a path are checked: | ||||
- ends with a directory separator | - ends with a directory separator | ||||
- under top-level .hg | - under top-level .hg | ||||
- starts at the root of a windows drive | - starts at the root of a windows drive | ||||
- contains ".." | - contains ".." | ||||
# type: (bytes) -> Iterator[bytes] | # type: (bytes) -> Iterator[bytes] | ||||
pos = path.rfind(b'/') | pos = path.rfind(b'/') | ||||
while pos != -1: | while pos != -1: | ||||
yield path[:pos] | yield path[:pos] | ||||
pos = path.rfind(b'/', 0, pos) | pos = path.rfind(b'/', 0, pos) | ||||
yield b'' | yield b'' | ||||
class dirs(object): | class dirs: | ||||
'''a multiset of directory names from a set of file paths''' | '''a multiset of directory names from a set of file paths''' | ||||
def __init__(self, map, only_tracked=False): | def __init__(self, map, only_tracked=False): | ||||
""" | """ | ||||
a dict map indicates a dirstate while a list indicates a manifest | a dict map indicates a dirstate while a list indicates a manifest | ||||
""" | """ | ||||
self._dirs = {} | self._dirs = {} | ||||
addpath = self.addpath | addpath = self.addpath |
r1, t1 = data[low] | r1, t1 = data[low] | ||||
if r1[0] > rev: | if r1[0] > rev: | ||||
data.insert(low, (pycompat.xrange(rev, rev + 1), t)) | data.insert(low, (pycompat.xrange(rev, rev + 1), t)) | ||||
else: | else: | ||||
data.insert(low + 1, (pycompat.xrange(rev, rev + 1), t)) | data.insert(low + 1, (pycompat.xrange(rev, rev + 1), t)) | ||||
class phasecache(object): | class phasecache: | ||||
def __init__(self, repo, phasedefaults, _load=True): | def __init__(self, repo, phasedefaults, _load=True): | ||||
# type: (localrepo.localrepository, Optional[Phasedefaults], bool) -> None | # type: (localrepo.localrepository, Optional[Phasedefaults], bool) -> None | ||||
if _load: | if _load: | ||||
# Cheap trick to allow shallow-copy without copy module | # Cheap trick to allow shallow-copy without copy module | ||||
self.phaseroots, self.dirty = _readroots(repo, phasedefaults) | self.phaseroots, self.dirty = _readroots(repo, phasedefaults) | ||||
self._loadedrevslen = 0 | self._loadedrevslen = 0 | ||||
self._phasesets = None | self._phasesets = None | ||||
self.filterunknown(repo) | self.filterunknown(repo) | ||||
_(b'ignoring unexpected root from remote: %i %s\n') | _(b'ignoring unexpected root from remote: %i %s\n') | ||||
% (phase, nhex) | % (phase, nhex) | ||||
) | ) | ||||
# compute heads | # compute heads | ||||
publicheads = newheads(repo, subset, draftroots) | publicheads = newheads(repo, subset, draftroots) | ||||
return publicheads, draftroots | return publicheads, draftroots | ||||
class remotephasessummary(object): | class remotephasessummary: | ||||
"""summarize phase information on the remote side | """summarize phase information on the remote side | ||||
:publishing: True is the remote is publishing | :publishing: True is the remote is publishing | ||||
:publicheads: list of remote public phase heads (nodes) | :publicheads: list of remote public phase heads (nodes) | ||||
:draftheads: list of remote draft phase heads (nodes) | :draftheads: list of remote draft phase heads (nodes) | ||||
:draftroots: list of remote draft phase root (nodes) | :draftroots: list of remote draft phase root (nodes) | ||||
""" | """ | ||||
"""Hide current shell window. | """Hide current shell window. | ||||
Used to hide the window opened when starting asynchronous | Used to hide the window opened when starting asynchronous | ||||
child process under Windows, unneeded on other systems. | child process under Windows, unneeded on other systems. | ||||
""" | """ | ||||
pass | pass | ||||
class cachestat(object): | class cachestat: | ||||
def __init__(self, path): | def __init__(self, path): | ||||
self.stat = os.stat(path) | self.stat = os.stat(path) | ||||
def cacheable(self): | def cacheable(self): | ||||
return bool(self.stat.st_ino) | return bool(self.stat.st_ino) | ||||
__hash__ = object.__hash__ | __hash__ = object.__hash__ | ||||
limit = ui.configwith(fraction, b'profiling', b'showmin', 0.05) | limit = ui.configwith(fraction, b'profiling', b'showmin', 0.05) | ||||
kwargs['limit'] = limit | kwargs['limit'] = limit | ||||
showtime = ui.configbool(b'profiling', b'showtime') | showtime = ui.configbool(b'profiling', b'showtime') | ||||
kwargs['showtime'] = showtime | kwargs['showtime'] = showtime | ||||
statprof.display(fp, data=data, format=displayformat, **kwargs) | statprof.display(fp, data=data, format=displayformat, **kwargs) | ||||
class profile(object): | class profile: | ||||
"""Start profiling. | """Start profiling. | ||||
Profiling is active when the context manager is active. When the context | Profiling is active when the context manager is active. When the context | ||||
manager exits, profiling results will be written to the configured output. | manager exits, profiling results will be written to the configured output. | ||||
""" | """ | ||||
def __init__(self, ui, enabled=True): | def __init__(self, ui, enabled=True): | ||||
self._ui = ui | self._ui = ui | ||||
try: | try: | ||||
if self._output == b'blackbox': | if self._output == b'blackbox': | ||||
self._fp = util.stringio() | self._fp = util.stringio() | ||||
elif self._output: | elif self._output: | ||||
path = util.expandpath(self._output) | path = util.expandpath(self._output) | ||||
self._fp = open(path, b'wb') | self._fp = open(path, b'wb') | ||||
elif pycompat.iswindows: | elif pycompat.iswindows: | ||||
# parse escape sequence by win32print() | # parse escape sequence by win32print() | ||||
class uifp(object): | class uifp: | ||||
def __init__(self, ui): | def __init__(self, ui): | ||||
self._ui = ui | self._ui = ui | ||||
def write(self, data): | def write(self, data): | ||||
self._ui.write_err(data) | self._ui.write_err(data) | ||||
def flush(self): | def flush(self): | ||||
self._ui.flush() | self._ui.flush() |
try: | try: | ||||
return func(*args) | return func(*args) | ||||
except IOError as err: | except IOError as err: | ||||
if err.errno == errno.EINTR: | if err.errno == errno.EINTR: | ||||
continue | continue | ||||
raise | raise | ||||
class progbar(object): | class progbar: | ||||
def __init__(self, ui): | def __init__(self, ui): | ||||
self.ui = ui | self.ui = ui | ||||
self._refreshlock = threading.Lock() | self._refreshlock = threading.Lock() | ||||
self.resetstate() | self.resetstate() | ||||
def resetstate(self): | def resetstate(self): | ||||
self.topics = [] | self.topics = [] | ||||
self.topicstates = {} | self.topicstates = {} |
_kernel32.CreateFileA.restype = _HANDLE | _kernel32.CreateFileA.restype = _HANDLE | ||||
def _raiseioerror(name): | def _raiseioerror(name): | ||||
err = ctypes.WinError() # pytype: disable=module-attr | err = ctypes.WinError() # pytype: disable=module-attr | ||||
raise IOError( | raise IOError( | ||||
err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror) | err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror) | ||||
) | ) | ||||
class posixfile(object): | class posixfile: | ||||
"""a file object aiming for POSIX-like semantics | """a file object aiming for POSIX-like semantics | ||||
CPython's open() returns a file that was opened *without* setting the | CPython's open() returns a file that was opened *without* setting the | ||||
_FILE_SHARE_DELETE flag, which causes rename and unlink to abort. | _FILE_SHARE_DELETE flag, which causes rename and unlink to abort. | ||||
This even happens if any hardlinked copy of the file is in open state. | This even happens if any hardlinked copy of the file is in open state. | ||||
We set _FILE_SHARE_DELETE here, so files opened with posixfile can be | We set _FILE_SHARE_DELETE here, so files opened with posixfile can be | ||||
renamed and deleted while they are held open. | renamed and deleted while they are held open. | ||||
Note that if a file opened with posixfile is unlinked, the file | Note that if a file opened with posixfile is unlinked, the file |
DIRSTATE_V2_HAS_MTIME = 1 << 11 | DIRSTATE_V2_HAS_MTIME = 1 << 11 | ||||
DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS = 1 << 12 | DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS = 1 << 12 | ||||
DIRSTATE_V2_DIRECTORY = 1 << 13 | DIRSTATE_V2_DIRECTORY = 1 << 13 | ||||
DIRSTATE_V2_ALL_UNKNOWN_RECORDED = 1 << 14 | DIRSTATE_V2_ALL_UNKNOWN_RECORDED = 1 << 14 | ||||
DIRSTATE_V2_ALL_IGNORED_RECORDED = 1 << 15 | DIRSTATE_V2_ALL_IGNORED_RECORDED = 1 << 15 | ||||
@attr.s(slots=True, init=False) | @attr.s(slots=True, init=False) | ||||
class DirstateItem(object): | class DirstateItem: | ||||
"""represent a dirstate entry | """represent a dirstate entry | ||||
It hold multiple attributes | It hold multiple attributes | ||||
# about file tracking | # about file tracking | ||||
- wc_tracked: is the file tracked by the working copy | - wc_tracked: is the file tracked by the working copy | ||||
- p1_tracked: is the file tracked in working copy first parent | - p1_tracked: is the file tracked in working copy first parent | ||||
- p2_info: the file has been involved in some merge operation. Either | - p2_info: the file has been involved in some merge operation. Either | ||||
else: | else: | ||||
return self._mtime_s | return self._mtime_s | ||||
def gettype(q): | def gettype(q): | ||||
return int(q & 0xFFFF) | return int(q & 0xFFFF) | ||||
class BaseIndexObject(object): | class BaseIndexObject: | ||||
# Can I be passed to an algorithme implemented in Rust ? | # Can I be passed to an algorithme implemented in Rust ? | ||||
rust_ext_compat = 0 | rust_ext_compat = 0 | ||||
# Format of an index entry according to Python's `struct` language | # Format of an index entry according to Python's `struct` language | ||||
index_format = revlog_constants.INDEX_ENTRY_V1 | index_format = revlog_constants.INDEX_ENTRY_V1 | ||||
# Size of a C unsigned long long int, platform independent | # Size of a C unsigned long long int, platform independent | ||||
big_int_size = struct.calcsize(b'>Q') | big_int_size = struct.calcsize(b'>Q') | ||||
# Size of a C long int, platform independent | # Size of a C long int, platform independent | ||||
int_size = struct.calcsize(b'>i') | int_size = struct.calcsize(b'>i') |
d, v = pvc[p1] | d, v = pvc[p1] | ||||
pvc[n] = (d + 1, _flipbit(v, node)) | pvc[n] = (d + 1, _flipbit(v, node)) | ||||
else: | else: | ||||
pvc[n] = _mergevec(pvc[p1], pvc[p2], node) | pvc[n] = _mergevec(pvc[p1], pvc[p2], node) | ||||
bs = _join(*pvc[ctx.rev()]) | bs = _join(*pvc[ctx.rev()]) | ||||
return pvec(util.b85encode(bs)) | return pvec(util.b85encode(bs)) | ||||
class pvec(object): | class pvec: | ||||
def __init__(self, hashorctx): | def __init__(self, hashorctx): | ||||
if isinstance(hashorctx, bytes): | if isinstance(hashorctx, bytes): | ||||
self._bs = hashorctx | self._bs = hashorctx | ||||
self._depth, self._vec = _split(util.b85decode(hashorctx)) | self._depth, self._vec = _split(util.b85decode(hashorctx)) | ||||
else: | else: | ||||
self._vec = ctxpvec(hashorctx) | self._vec = ctxpvec(hashorctx) | ||||
def __str__(self): | def __str__(self): |
>>> bytestr(), bytestr(bytearray(b'foo')), bytestr(u'ascii'), bytestr(1) | >>> bytestr(), bytestr(bytearray(b'foo')), bytestr(u'ascii'), bytestr(1) | ||||
('', 'foo', 'ascii', '1') | ('', 'foo', 'ascii', '1') | ||||
>>> s = bytestr(b'foo') | >>> s = bytestr(b'foo') | ||||
>>> assert s is bytestr(s) | >>> assert s is bytestr(s) | ||||
__bytes__() should be called if provided: | __bytes__() should be called if provided: | ||||
>>> class bytesable(object): | >>> class bytesable: | ||||
... def __bytes__(self): | ... def __bytes__(self): | ||||
... return b'bytes' | ... return b'bytes' | ||||
>>> bytestr(bytesable()) | >>> bytestr(bytesable()) | ||||
'bytes' | 'bytes' | ||||
There's no implicit conversion from non-ascii str as its encoding is | There's no implicit conversion from non-ascii str as its encoding is | ||||
unknown: | unknown: | ||||
# unlike the other registered items, config options are neither functions or | # unlike the other registered items, config options are neither functions or | ||||
# classes. Registering the option is just small function call. | # classes. Registering the option is just small function call. | ||||
# | # | ||||
# We still add the official API to the registrar module for consistency with | # We still add the official API to the registrar module for consistency with | ||||
# the other items extensions want might to register. | # the other items extensions want might to register. | ||||
configitem = configitems.getitemregister | configitem = configitems.getitemregister | ||||
class _funcregistrarbase(object): | class _funcregistrarbase: | ||||
"""Base of decorator to register a function for specific purpose | """Base of decorator to register a function for specific purpose | ||||
This decorator stores decorated functions into own dict 'table'. | This decorator stores decorated functions into own dict 'table'. | ||||
The least derived class can be defined by overriding 'formatdoc', | The least derived class can be defined by overriding 'formatdoc', | ||||
for example:: | for example:: | ||||
class keyword(_funcregistrarbase): | class keyword(_funcregistrarbase): |
nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip)) | nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip)) | ||||
ui.warn( | ui.warn( | ||||
_(b'warning: orphaned descendants detected, not stripping %s\n') | _(b'warning: orphaned descendants detected, not stripping %s\n') | ||||
% nodestr | % nodestr | ||||
) | ) | ||||
return [c.node() for c in repo.set(b'roots(%ld)', tostrip)] | return [c.node() for c in repo.set(b'roots(%ld)', tostrip)] | ||||
class stripcallback(object): | class stripcallback: | ||||
"""used as a transaction postclose callback""" | """used as a transaction postclose callback""" | ||||
def __init__(self, ui, repo, backup, topic): | def __init__(self, ui, repo, backup, topic): | ||||
self.ui = ui | self.ui = ui | ||||
self.repo = repo | self.repo = repo | ||||
self.backup = backup | self.backup = backup | ||||
self.topic = topic or b'backup' | self.topic = topic or b'backup' | ||||
self.nodelist = [] | self.nodelist = [] |
error, | error, | ||||
hg, | hg, | ||||
obsolete, | obsolete, | ||||
scmutil, | scmutil, | ||||
util, | util, | ||||
) | ) | ||||
class repoloader(object): | class repoloader: | ||||
"""Load repositories in background thread | """Load repositories in background thread | ||||
This is designed for a forking server. A cached repo cannot be obtained | This is designed for a forking server. A cached repo cannot be obtained | ||||
until the server fork()s a worker and the loader thread stops. | until the server fork()s a worker and the loader thread stops. | ||||
""" | """ | ||||
def __init__(self, ui, maxlen): | def __init__(self, ui, maxlen): | ||||
self._ui = ui.copy() | self._ui = ui.copy() |
class filteredchangelog(filteredchangelogmixin, cl.__class__): | class filteredchangelog(filteredchangelogmixin, cl.__class__): | ||||
pass | pass | ||||
cl.__class__ = filteredchangelog | cl.__class__ = filteredchangelog | ||||
return cl | return cl | ||||
class filteredchangelogmixin(object): | class filteredchangelogmixin: | ||||
def tiprev(self): | def tiprev(self): | ||||
"""filtered version of revlog.tiprev""" | """filtered version of revlog.tiprev""" | ||||
for i in pycompat.xrange(len(self) - 1, -2, -1): | for i in pycompat.xrange(len(self) - 1, -2, -1): | ||||
if i not in self.filteredrevs: | if i not in self.filteredrevs: | ||||
return i | return i | ||||
def __contains__(self, rev): | def __contains__(self, rev): | ||||
"""filtered version of revlog.__contains__""" | """filtered version of revlog.__contains__""" | ||||
def flags(self, rev): | def flags(self, rev): | ||||
"""filtered version of revlog.flags""" | """filtered version of revlog.flags""" | ||||
if rev in self.filteredrevs: | if rev in self.filteredrevs: | ||||
raise error.FilteredIndexError(rev) | raise error.FilteredIndexError(rev) | ||||
return super(filteredchangelogmixin, self).flags(rev) | return super(filteredchangelogmixin, self).flags(rev) | ||||
class repoview(object): | class repoview: | ||||
"""Provide a read/write view of a repo through a filtered changelog | """Provide a read/write view of a repo through a filtered changelog | ||||
This object is used to access a filtered version of a repository without | This object is used to access a filtered version of a repository without | ||||
altering the original repository object itself. We can not alter the | altering the original repository object itself. We can not alter the | ||||
original object for two main reasons: | original object for two main reasons: | ||||
- It prevents the use of a repo with multiple filters at the same time. In | - It prevents the use of a repo with multiple filters at the same time. In | ||||
particular when multiple threads are involved. | particular when multiple threads are involved. | ||||
- It makes scope of the filtering harder to control. | - It makes scope of the filtering harder to control. |
# wheelbarrow of other slowness source) | # wheelbarrow of other slowness source) | ||||
HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr( | HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr( | ||||
parsers, 'BaseIndexObject' | parsers, 'BaseIndexObject' | ||||
) | ) | ||||
@interfaceutil.implementer(repository.irevisiondelta) | @interfaceutil.implementer(repository.irevisiondelta) | ||||
@attr.s(slots=True) | @attr.s(slots=True) | ||||
class revlogrevisiondelta(object): | class revlogrevisiondelta: | ||||
node = attr.ib() | node = attr.ib() | ||||
p1node = attr.ib() | p1node = attr.ib() | ||||
p2node = attr.ib() | p2node = attr.ib() | ||||
basenode = attr.ib() | basenode = attr.ib() | ||||
flags = attr.ib() | flags = attr.ib() | ||||
baserevisionsize = attr.ib() | baserevisionsize = attr.ib() | ||||
revision = attr.ib() | revision = attr.ib() | ||||
delta = attr.ib() | delta = attr.ib() | ||||
sidedata = attr.ib() | sidedata = attr.ib() | ||||
protocol_flags = attr.ib() | protocol_flags = attr.ib() | ||||
linknode = attr.ib(default=None) | linknode = attr.ib(default=None) | ||||
@interfaceutil.implementer(repository.iverifyproblem) | @interfaceutil.implementer(repository.iverifyproblem) | ||||
@attr.s(frozen=True) | @attr.s(frozen=True) | ||||
class revlogproblem(object): | class revlogproblem: | ||||
warning = attr.ib(default=None) | warning = attr.ib(default=None) | ||||
error = attr.ib(default=None) | error = attr.ib(default=None) | ||||
node = attr.ib(default=None) | node = attr.ib(default=None) | ||||
def parse_index_v1(data, inline): | def parse_index_v1(data, inline): | ||||
# call the C implementation to parse the index data | # call the C implementation to parse the index data | ||||
index, cache = parsers.parse_index2(data, inline) | index, cache = parsers.parse_index2(data, inline) | ||||
_maxentrysize = 0x7FFFFFFF | _maxentrysize = 0x7FFFFFFF | ||||
FILE_TOO_SHORT_MSG = _( | FILE_TOO_SHORT_MSG = _( | ||||
b'cannot read from revlog %s;' | b'cannot read from revlog %s;' | ||||
b' expected %d bytes from offset %d, data size is %d' | b' expected %d bytes from offset %d, data size is %d' | ||||
) | ) | ||||
class revlog(object): | class revlog: | ||||
""" | """ | ||||
the underlying revision storage object | the underlying revision storage object | ||||
A revlog consists of two parts, an index and the revision data. | A revlog consists of two parts, an index and the revision data. | ||||
The index is a file with a fixed record size containing | The index is a file with a fixed record size containing | ||||
information on each revision, including its nodeid (hash), the | information on each revision, including its nodeid (hash), the | ||||
nodeids of its parents, the position and offset of its data within | nodeids of its parents, the position and offset of its data within | ||||
common = [self.nullid] | common = [self.nullid] | ||||
if heads is None: | if heads is None: | ||||
heads = self.heads() | heads = self.heads() | ||||
common = [self.rev(n) for n in common] | common = [self.rev(n) for n in common] | ||||
heads = [self.rev(n) for n in heads] | heads = [self.rev(n) for n in heads] | ||||
# we want the ancestors, but inclusive | # we want the ancestors, but inclusive | ||||
class lazyset(object): | class lazyset: | ||||
def __init__(self, lazyvalues): | def __init__(self, lazyvalues): | ||||
self.addedvalues = set() | self.addedvalues = set() | ||||
self.lazyvalues = lazyvalues | self.lazyvalues = lazyvalues | ||||
def __contains__(self, value): | def __contains__(self, value): | ||||
return value in self.addedvalues or value in self.lazyvalues | return value in self.addedvalues or value in self.lazyvalues | ||||
def __iter__(self): | def __iter__(self): |
sidedata_compressed_length, | sidedata_compressed_length, | ||||
data_compression_mode, | data_compression_mode, | ||||
sidedata_compression_mode, | sidedata_compression_mode, | ||||
rank, | rank, | ||||
) | ) | ||||
@attr.s(slots=True, frozen=True) | @attr.s(slots=True, frozen=True) | ||||
class revisioninfo(object): | class revisioninfo: | ||||
"""Information about a revision that allows building its fulltext | """Information about a revision that allows building its fulltext | ||||
node: expected hash of the revision | node: expected hash of the revision | ||||
p1, p2: parent revs of the revision | p1, p2: parent revs of the revision | ||||
btext: built text cache consisting of a one-element list | btext: built text cache consisting of a one-element list | ||||
cachedelta: (baserev, uncompressed_delta) or None | cachedelta: (baserev, uncompressed_delta) or None | ||||
flags: flags associated to the revision storage | flags: flags associated to the revision storage | ||||
One of btext[0] or cachedelta must be set. | One of btext[0] or cachedelta must be set. |
) | ) | ||||
from . import flagutil | from . import flagutil | ||||
# maximum <delta-chain-data>/<revision-text-length> ratio | # maximum <delta-chain-data>/<revision-text-length> ratio | ||||
LIMIT_DELTA2TEXT = 2 | LIMIT_DELTA2TEXT = 2 | ||||
class _testrevlog(object): | class _testrevlog: | ||||
"""minimalist fake revlog to use in doctests""" | """minimalist fake revlog to use in doctests""" | ||||
def __init__(self, data, density=0.5, mingap=0, snapshot=()): | def __init__(self, data, density=0.5, mingap=0, snapshot=()): | ||||
"""data is an list of revision payload boundaries""" | """data is an list of revision payload boundaries""" | ||||
self._data = data | self._data = data | ||||
self._srdensitythreshold = density | self._srdensitythreshold = density | ||||
self._srmingapsize = mingap | self._srmingapsize = mingap | ||||
self._snapshot = set(snapshot) | self._snapshot = set(snapshot) | ||||
except error.CensoredNodeError: | except error.CensoredNodeError: | ||||
# must pass the censored index flag to add censored revisions | # must pass the censored index flag to add censored revisions | ||||
if not flags & REVIDX_ISCENSORED: | if not flags & REVIDX_ISCENSORED: | ||||
raise | raise | ||||
return fulltext | return fulltext | ||||
@attr.s(slots=True, frozen=True) | @attr.s(slots=True, frozen=True) | ||||
class _deltainfo(object): | class _deltainfo: | ||||
distance = attr.ib() | distance = attr.ib() | ||||
deltalen = attr.ib() | deltalen = attr.ib() | ||||
data = attr.ib() | data = attr.ib() | ||||
base = attr.ib() | base = attr.ib() | ||||
chainbase = attr.ib() | chainbase = attr.ib() | ||||
chainlen = attr.ib() | chainlen = attr.ib() | ||||
compresseddeltalen = attr.ib() | compresseddeltalen = attr.ib() | ||||
snapshotdepth = attr.ib() | snapshotdepth = attr.ib() | ||||
yield tuple(snapshots[nullrev]) | yield tuple(snapshots[nullrev]) | ||||
if not sparse: | if not sparse: | ||||
# other approach failed try against prev to hopefully save us a | # other approach failed try against prev to hopefully save us a | ||||
# fulltext. | # fulltext. | ||||
yield (prev,) | yield (prev,) | ||||
class deltacomputer(object): | class deltacomputer: | ||||
def __init__(self, revlog): | def __init__(self, revlog): | ||||
self.revlog = revlog | self.revlog = revlog | ||||
def buildtext(self, revinfo, fh): | def buildtext(self, revinfo, fh): | ||||
"""Builds a fulltext version of a revision | """Builds a fulltext version of a revision | ||||
revinfo: revisioninfo instance that contains all needed info | revinfo: revisioninfo instance that contains all needed info | ||||
fh: file handle to either the .i or the .d revlog file, | fh: file handle to either the .i or the .d revlog file, |
# * 8 bytes: pending size of sidedata | # * 8 bytes: pending size of sidedata | ||||
# * 1 bytes: default compression header | # * 1 bytes: default compression header | ||||
S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBBBBLLLLLLc') | S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBBBBLLLLLLc') | ||||
# * 1 bytes: size of index uuid | # * 1 bytes: size of index uuid | ||||
# * 8 bytes: size of file | # * 8 bytes: size of file | ||||
S_OLD_UID = struct.Struct('>BL') | S_OLD_UID = struct.Struct('>BL') | ||||
class RevlogDocket(object): | class RevlogDocket: | ||||
"""metadata associated with revlog""" | """metadata associated with revlog""" | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
revlog, | revlog, | ||||
use_pending=False, | use_pending=False, | ||||
version_header=None, | version_header=None, | ||||
index_uuid=None, | index_uuid=None, |
if tr.hasfinalize(callback_id): | if tr.hasfinalize(callback_id): | ||||
return # no need to register again | return # no need to register again | ||||
tr.addpending( | tr.addpending( | ||||
callback_id, lambda tr: persist_nodemap(tr, revlog, pending=True) | callback_id, lambda tr: persist_nodemap(tr, revlog, pending=True) | ||||
) | ) | ||||
tr.addfinalize(callback_id, lambda tr: persist_nodemap(tr, revlog)) | tr.addfinalize(callback_id, lambda tr: persist_nodemap(tr, revlog)) | ||||
class _NoTransaction(object): | class _NoTransaction: | ||||
"""transaction like object to update the nodemap outside a transaction""" | """transaction like object to update the nodemap outside a transaction""" | ||||
def __init__(self): | def __init__(self): | ||||
self._postclose = {} | self._postclose = {} | ||||
def addpostclose(self, callback_id, callback_func): | def addpostclose(self, callback_id, callback_func): | ||||
self._postclose[callback_id] = callback_func | self._postclose[callback_id] = callback_func | ||||
# data. Its content is currently very light, but it will expand as the on disk | # data. Its content is currently very light, but it will expand as the on disk | ||||
# nodemap gains the necessary features to be used in production. | # nodemap gains the necessary features to be used in production. | ||||
ONDISK_VERSION = 1 | ONDISK_VERSION = 1 | ||||
S_VERSION = struct.Struct(">B") | S_VERSION = struct.Struct(">B") | ||||
S_HEADER = struct.Struct(">BQQQQ") | S_HEADER = struct.Struct(">BQQQQ") | ||||
class NodeMapDocket(object): | class NodeMapDocket: | ||||
"""metadata associated with persistent nodemap data | """metadata associated with persistent nodemap data | ||||
The persistent data may come from disk or be on their way to disk. | The persistent data may come from disk or be on their way to disk. | ||||
""" | """ | ||||
def __init__(self, uid=None): | def __init__(self, uid=None): | ||||
if uid is None: | if uid is None: | ||||
uid = docket_mod.make_uid() | uid = docket_mod.make_uid() |
b'partial read of revlog %s; expected %d bytes from offset %d, got %d' | b'partial read of revlog %s; expected %d bytes from offset %d, got %d' | ||||
) | ) | ||||
def _is_power_of_two(n): | def _is_power_of_two(n): | ||||
return (n & (n - 1) == 0) and n != 0 | return (n & (n - 1) == 0) and n != 0 | ||||
class randomaccessfile(object): | class randomaccessfile: | ||||
"""Accessing arbitrary chuncks of data within a file, with some caching""" | """Accessing arbitrary chuncks of data within a file, with some caching""" | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
opener, | opener, | ||||
filename, | filename, | ||||
default_cached_chunk_size, | default_cached_chunk_size, | ||||
initial_cache=None, | initial_cache=None, |
parsers = policy.importmod('parsers') | parsers = policy.importmod('parsers') | ||||
rustrevlog = policy.importrust('revlog') | rustrevlog = policy.importrust('revlog') | ||||
termsize = scmplatform.termsize | termsize = scmplatform.termsize | ||||
@attr.s(slots=True, repr=False) | @attr.s(slots=True, repr=False) | ||||
class status(object): | class status: | ||||
"""Struct with a list of files per status. | """Struct with a list of files per status. | ||||
The 'deleted', 'unknown' and 'ignored' properties are only | The 'deleted', 'unknown' and 'ignored' properties are only | ||||
relevant to the working copy. | relevant to the working copy. | ||||
""" | """ | ||||
modified = attr.ib(default=attr.Factory(list)) | modified = attr.ib(default=attr.Factory(list)) | ||||
added = attr.ib(default=attr.Factory(list)) | added = attr.ib(default=attr.Factory(list)) | ||||
warn = bval or lval == b'warn' | warn = bval or lval == b'warn' | ||||
if bval is None and not (warn or abort or lval == b'ignore'): | if bval is None and not (warn or abort or lval == b'ignore'): | ||||
raise error.ConfigError( | raise error.ConfigError( | ||||
_(b"ui.portablefilenames value is invalid ('%s')") % val | _(b"ui.portablefilenames value is invalid ('%s')") % val | ||||
) | ) | ||||
return abort, warn | return abort, warn | ||||
class casecollisionauditor(object): | class casecollisionauditor: | ||||
def __init__(self, ui, abort, dirstate): | def __init__(self, ui, abort, dirstate): | ||||
self._ui = ui | self._ui = ui | ||||
self._abort = abort | self._abort = abort | ||||
allfiles = b'\0'.join(dirstate) | allfiles = b'\0'.join(dirstate) | ||||
self._loweredfiles = set(encoding.lower(allfiles).split(b'\0')) | self._loweredfiles = set(encoding.lower(allfiles).split(b'\0')) | ||||
self._dirstate = dirstate | self._dirstate = dirstate | ||||
# The purpose of _newfiles is so that we don't complain about | # The purpose of _newfiles is so that we don't complain about | ||||
# case collisions if someone were to call this object with the | # case collisions if someone were to call this object with the | ||||
ui.note( | ui.note( | ||||
_(b'removing conflicting directory: %s\n') % origvfs.join(filepath) | _(b'removing conflicting directory: %s\n') % origvfs.join(filepath) | ||||
) | ) | ||||
origvfs.rmtree(filepath, forcibly=True) | origvfs.rmtree(filepath, forcibly=True) | ||||
return origvfs.join(filepath) | return origvfs.join(filepath) | ||||
class _containsnode(object): | class _containsnode: | ||||
"""proxy __contains__(node) to container.__contains__ which accepts revs""" | """proxy __contains__(node) to container.__contains__ which accepts revs""" | ||||
def __init__(self, repo, revcontainer): | def __init__(self, repo, revcontainer): | ||||
self._torev = repo.changelog.rev | self._torev = repo.changelog.rev | ||||
self._revcontains = revcontainer.__contains__ | self._revcontains = revcontainer.__contains__ | ||||
def __contains__(self, node): | def __contains__(self, node): | ||||
return self._revcontains(self._torev(node)) | return self._revcontains(self._torev(node)) | ||||
def writerequires(opener, requirements): | def writerequires(opener, requirements): | ||||
with opener(b'requires', b'w', atomictemp=True) as fp: | with opener(b'requires', b'w', atomictemp=True) as fp: | ||||
for r in sorted(requirements): | for r in sorted(requirements): | ||||
fp.write(b"%s\n" % r) | fp.write(b"%s\n" % r) | ||||
class filecachesubentry(object): | class filecachesubentry: | ||||
def __init__(self, path, stat): | def __init__(self, path, stat): | ||||
self.path = path | self.path = path | ||||
self.cachestat = None | self.cachestat = None | ||||
self._cacheable = None | self._cacheable = None | ||||
if stat: | if stat: | ||||
self.cachestat = filecachesubentry.stat(self.path) | self.cachestat = filecachesubentry.stat(self.path) | ||||
def stat(path): | def stat(path): | ||||
try: | try: | ||||
return util.cachestat(path) | return util.cachestat(path) | ||||
except OSError as e: | except OSError as e: | ||||
if e.errno != errno.ENOENT: | if e.errno != errno.ENOENT: | ||||
raise | raise | ||||
class filecacheentry(object): | class filecacheentry: | ||||
def __init__(self, paths, stat=True): | def __init__(self, paths, stat=True): | ||||
self._entries = [] | self._entries = [] | ||||
for path in paths: | for path in paths: | ||||
self._entries.append(filecachesubentry(path, stat)) | self._entries.append(filecachesubentry(path, stat)) | ||||
def changed(self): | def changed(self): | ||||
'''true if any entry has changed''' | '''true if any entry has changed''' | ||||
for entry in self._entries: | for entry in self._entries: | ||||
if entry.changed(): | if entry.changed(): | ||||
return True | return True | ||||
return False | return False | ||||
def refresh(self): | def refresh(self): | ||||
for entry in self._entries: | for entry in self._entries: | ||||
entry.refresh() | entry.refresh() | ||||
class filecache(object): | class filecache: | ||||
"""A property like decorator that tracks files under .hg/ for updates. | """A property like decorator that tracks files under .hg/ for updates. | ||||
On first access, the files defined as arguments are stat()ed and the | On first access, the files defined as arguments are stat()ed and the | ||||
results cached. The decorated function is called. The results are stashed | results cached. The decorated function is called. The results are stashed | ||||
away in a ``_filecache`` dict on the object whose method is decorated. | away in a ``_filecache`` dict on the object whose method is decorated. | ||||
On subsequent access, the cached result is used as it is set to the | On subsequent access, the cached result is used as it is set to the | ||||
instance dictionary. | instance dictionary. | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"extdata command '%s' failed: %s") | _(b"extdata command '%s' failed: %s") | ||||
% (cmd, procutil.explainexit(proc.returncode)) | % (cmd, procutil.explainexit(proc.returncode)) | ||||
) | ) | ||||
return data | return data | ||||
class progress(object): | class progress: | ||||
def __init__(self, ui, updatebar, topic, unit=b"", total=None): | def __init__(self, ui, updatebar, topic, unit=b"", total=None): | ||||
self.ui = ui | self.ui = ui | ||||
self.pos = 0 | self.pos = 0 | ||||
self.topic = topic | self.topic = topic | ||||
self.unit = unit | self.unit = unit | ||||
self.total = total | self.total = total | ||||
self.debug = ui.configbool(b'progress', b'debug') | self.debug = ui.configbool(b'progress', b'debug') | ||||
self._updatebar = updatebar | self._updatebar = updatebar | ||||
def gddeltaconfig(ui): | def gddeltaconfig(ui): | ||||
"""helper function to know if incoming delta should be optimised""" | """helper function to know if incoming delta should be optimised""" | ||||
# experimental config: format.generaldelta | # experimental config: format.generaldelta | ||||
return ui.configbool(b'format', b'generaldelta') | return ui.configbool(b'format', b'generaldelta') | ||||
class simplekeyvaluefile(object): | class simplekeyvaluefile: | ||||
"""A simple file with key=value lines | """A simple file with key=value lines | ||||
Keys must be alphanumerics and start with a letter, values must not | Keys must be alphanumerics and start with a letter, values must not | ||||
contain '\n' characters""" | contain '\n' characters""" | ||||
firstlinekey = b'__firstline' | firstlinekey = b'__firstline' | ||||
def __init__(self, vfs, path, keys=None): | def __init__(self, vfs, path, keys=None): |
return sample | return sample | ||||
if randomize: | if randomize: | ||||
return set(random.sample(sample, desiredlen)) | return set(random.sample(sample, desiredlen)) | ||||
sample = list(sample) | sample = list(sample) | ||||
sample.sort() | sample.sort() | ||||
return set(sample[:desiredlen]) | return set(sample[:desiredlen]) | ||||
class partialdiscovery(object): | class partialdiscovery: | ||||
"""an object representing ongoing discovery | """an object representing ongoing discovery | ||||
Feed with data from the remote repository, this object keep track of the | Feed with data from the remote repository, this object keep track of the | ||||
current set of changeset in various states: | current set of changeset in various states: | ||||
- common: revs also known remotely | - common: revs also known remotely | ||||
- undecided: revs we don't have information on yet | - undecided: revs we don't have information on yet | ||||
- missing: revs missing remotely | - missing: revs missing remotely |
shelvedir = b'shelved' | shelvedir = b'shelved' | ||||
shelvefileextensions = [b'hg', b'patch', b'shelve'] | shelvefileextensions = [b'hg', b'patch', b'shelve'] | ||||
# we never need the user, so we use a | # we never need the user, so we use a | ||||
# generic user for all shelve operations | # generic user for all shelve operations | ||||
shelveuser = b'shelve@localhost' | shelveuser = b'shelve@localhost' | ||||
class ShelfDir(object): | class ShelfDir: | ||||
def __init__(self, repo, for_backups=False): | def __init__(self, repo, for_backups=False): | ||||
if for_backups: | if for_backups: | ||||
self.vfs = vfsmod.vfs(repo.vfs.join(backupdir)) | self.vfs = vfsmod.vfs(repo.vfs.join(backupdir)) | ||||
else: | else: | ||||
self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir)) | self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir)) | ||||
def get(self, name): | def get(self, name): | ||||
return Shelf(self.vfs, name) | return Shelf(self.vfs, name) | ||||
shelf = self.get(name) | shelf = self.get(name) | ||||
if not shelf.exists(): | if not shelf.exists(): | ||||
continue | continue | ||||
mtime = shelf.mtime() | mtime = shelf.mtime() | ||||
info.append((mtime, name)) | info.append((mtime, name)) | ||||
return sorted(info, reverse=True) | return sorted(info, reverse=True) | ||||
class Shelf(object): | class Shelf: | ||||
"""Represents a shelf, including possibly multiple files storing it. | """Represents a shelf, including possibly multiple files storing it. | ||||
Old shelves will have a .patch and a .hg file. Newer shelves will | Old shelves will have a .patch and a .hg file. Newer shelves will | ||||
also have a .shelve file. This class abstracts away some of the | also have a .shelve file. This class abstracts away some of the | ||||
differences and lets you work with the shelf as a whole. | differences and lets you work with the shelf as a whole. | ||||
""" | """ | ||||
def __init__(self, vfs, name): | def __init__(self, vfs, name): | ||||
self._backupfilename(backupvfs, filename), | self._backupfilename(backupvfs, filename), | ||||
) | ) | ||||
def delete(self): | def delete(self): | ||||
for ext in shelvefileextensions: | for ext in shelvefileextensions: | ||||
self.vfs.tryunlink(self.name + b'.' + ext) | self.vfs.tryunlink(self.name + b'.' + ext) | ||||
class shelvedstate(object): | class shelvedstate: | ||||
"""Handle persistence during unshelving operations. | """Handle persistence during unshelving operations. | ||||
Handles saving and restoring a shelved state. Ensures that different | Handles saving and restoring a shelved state. Ensures that different | ||||
versions of a shelved state are possible and handles them appropriately. | versions of a shelved state are possible and handles them appropriately. | ||||
""" | """ | ||||
_version = 2 | _version = 2 | ||||
_filename = b'shelvedstate' | _filename = b'shelvedstate' |
pycompat.xrange(astart, aend), pycompat.xrange(bstart, bend) | pycompat.xrange(astart, aend), pycompat.xrange(bstart, bend) | ||||
): | ): | ||||
if a[ia] != b[ib]: | if a[ia] != b[ib]: | ||||
return False | return False | ||||
else: | else: | ||||
return True | return True | ||||
class Merge3Text(object): | class Merge3Text: | ||||
"""3-way merge of texts. | """3-way merge of texts. | ||||
Given strings BASE, OTHER, THIS, tries to produce a combined text | Given strings BASE, OTHER, THIS, tries to produce a combined text | ||||
incorporating the changes from both BASE->OTHER and BASE->THIS.""" | incorporating the changes from both BASE->OTHER and BASE->THIS.""" | ||||
def __init__(self, basetext, atext, btext, base=None, a=None, b=None): | def __init__(self, basetext, atext, btext, base=None, a=None, b=None): | ||||
self.basetext = basetext | self.basetext = basetext | ||||
self.atext = atext | self.atext = atext | ||||
if what == b'conflict': | if what == b'conflict': | ||||
for side in sides: | for side in sides: | ||||
lines.extend(group_lines[side]) | lines.extend(group_lines[side]) | ||||
else: | else: | ||||
lines.extend(group_lines) | lines.extend(group_lines) | ||||
return lines | return lines | ||||
class MergeInput(object): | class MergeInput: | ||||
def __init__(self, fctx, label=None, label_detail=None): | def __init__(self, fctx, label=None, label_detail=None): | ||||
self.fctx = fctx | self.fctx = fctx | ||||
self.label = label | self.label = label | ||||
# If the "detail" part is set, then that is rendered after the label and | # If the "detail" part is set, then that is rendered after the label and | ||||
# separated by a ':'. The label is padded to make the ':' aligned among | # separated by a ':'. The label is padded to make the ':' aligned among | ||||
# all merge inputs. | # all merge inputs. | ||||
self.label_detail = label_detail | self.label_detail = label_detail | ||||
self._text = None | self._text = None |
) | ) | ||||
from .utils import stringutil | from .utils import stringutil | ||||
def _typename(o): | def _typename(o): | ||||
return pycompat.sysbytes(type(o).__name__).lstrip(b'_') | return pycompat.sysbytes(type(o).__name__).lstrip(b'_') | ||||
class abstractsmartset(object): | class abstractsmartset: | ||||
def __nonzero__(self): | def __nonzero__(self): | ||||
"""True if the smartset is not empty""" | """True if the smartset is not empty""" | ||||
raise NotImplementedError() | raise NotImplementedError() | ||||
__bool__ = __nonzero__ | __bool__ = __nonzero__ | ||||
def __contains__(self, rev): | def __contains__(self, rev): | ||||
"""provide fast membership testing""" | """provide fast membership testing""" |
if pipe and not pipe.closed: | if pipe and not pipe.closed: | ||||
s = procutil.readpipe(pipe) | s = procutil.readpipe(pipe) | ||||
if s: | if s: | ||||
display = ui.warn if warn else ui.status | display = ui.warn if warn else ui.status | ||||
for l in s.splitlines(): | for l in s.splitlines(): | ||||
display(_(b"remote: "), l, b'\n') | display(_(b"remote: "), l, b'\n') | ||||
class doublepipe(object): | class doublepipe: | ||||
"""Operate a side-channel pipe in addition of a main one | """Operate a side-channel pipe in addition of a main one | ||||
The side-channel pipe contains server output to be forwarded to the user | The side-channel pipe contains server output to be forwarded to the user | ||||
input. The double pipe will behave as the "main" pipe, but will ensure the | input. The double pipe will behave as the "main" pipe, but will ensure the | ||||
content of the "side" pipe is properly processed while we wait for blocking | content of the "side" pipe is properly processed while we wait for blocking | ||||
call on the "main" pipe. | call on the "main" pipe. | ||||
If large amounts of data are read from "main", the forward will cease after | If large amounts of data are read from "main", the forward will cease after |
Any, | Any, | ||||
Dict, | Dict, | ||||
) | ) | ||||
for t in (Any, Dict): | for t in (Any, Dict): | ||||
assert t | assert t | ||||
class cmdstate(object): | class cmdstate: | ||||
"""a wrapper class to store the state of commands like `rebase`, `graft`, | """a wrapper class to store the state of commands like `rebase`, `graft`, | ||||
`histedit`, `shelve` etc. Extensions can also use this to write state files. | `histedit`, `shelve` etc. Extensions can also use this to write state files. | ||||
All the data for the state is stored in the form of key-value pairs in a | All the data for the state is stored in the form of key-value pairs in a | ||||
dictionary. | dictionary. | ||||
The class object can write all the data to a file in .hg/ directory and | The class object can write all the data to a file in .hg/ directory and | ||||
can populate the object data reading that file. | can populate the object data reading that file. | ||||
"""drop the state file if exists""" | """drop the state file if exists""" | ||||
util.unlinkpath(self._repo.vfs.join(self.fname), ignoremissing=True) | util.unlinkpath(self._repo.vfs.join(self.fname), ignoremissing=True) | ||||
def exists(self): | def exists(self): | ||||
"""check whether the state file exists or not""" | """check whether the state file exists or not""" | ||||
return self._repo.vfs.exists(self.fname) | return self._repo.vfs.exists(self.fname) | ||||
class _statecheck(object): | class _statecheck: | ||||
"""a utility class that deals with multistep operations like graft, | """a utility class that deals with multistep operations like graft, | ||||
histedit, bisect, update etc and check whether such commands | histedit, bisect, update etc and check whether such commands | ||||
are in an unfinished conditition or not and return appropriate message | are in an unfinished conditition or not and return appropriate message | ||||
and hint. | and hint. | ||||
It also has the ability to register and determine the states of any new | It also has the ability to register and determine the states of any new | ||||
multistep operation or multistep command extension. | multistep operation or multistep command extension. | ||||
""" | """ | ||||
from .utils import ( | from .utils import ( | ||||
urlutil, | urlutil, | ||||
) | ) | ||||
urlerr = util.urlerr | urlerr = util.urlerr | ||||
urlreq = util.urlreq | urlreq = util.urlreq | ||||
class httprangereader(object): | class httprangereader: | ||||
def __init__(self, url, opener): | def __init__(self, url, opener): | ||||
# we assume opener has HTTPRangeHandler | # we assume opener has HTTPRangeHandler | ||||
self.url = url | self.url = url | ||||
self.pos = 0 | self.pos = 0 | ||||
self.opener = opener | self.opener = opener | ||||
self.name = url | self.name = url | ||||
def __enter__(self): | def __enter__(self): |
times = os.times() | times = os.times() | ||||
return (times[0] + times[1], times[4]) | return (times[0] + times[1], times[4]) | ||||
########################################################################### | ########################################################################### | ||||
## Collection data structures | ## Collection data structures | ||||
class ProfileState(object): | class ProfileState: | ||||
def __init__(self, frequency=None): | def __init__(self, frequency=None): | ||||
self.reset(frequency) | self.reset(frequency) | ||||
self.track = b'cpu' | self.track = b'cpu' | ||||
def reset(self, frequency=None): | def reset(self, frequency=None): | ||||
# total so far | # total so far | ||||
self.accumulated_time = (0.0, 0.0) | self.accumulated_time = (0.0, 0.0) | ||||
# start_time when timer is active | # start_time when timer is active | ||||
if self.track == b'real': | if self.track == b'real': | ||||
return 1 | return 1 | ||||
return 0 | return 0 | ||||
state = ProfileState() | state = ProfileState() | ||||
class CodeSite(object): | class CodeSite: | ||||
cache = {} | cache = {} | ||||
__slots__ = ('path', 'lineno', 'function', 'source') | __slots__ = ('path', 'lineno', 'function', 'source') | ||||
def __init__(self, path, lineno, function): | def __init__(self, path, lineno, function): | ||||
assert isinstance(path, bytes) | assert isinstance(path, bytes) | ||||
self.path = path | self.path = path | ||||
self.lineno = lineno | self.lineno = lineno | ||||
def filename(self): | def filename(self): | ||||
return os.path.basename(self.path) | return os.path.basename(self.path) | ||||
def skipname(self): | def skipname(self): | ||||
return '%s:%s' % (self.filename(), self.function) | return '%s:%s' % (self.filename(), self.function) | ||||
class Sample(object): | class Sample: | ||||
__slots__ = ('stack', 'time') | __slots__ = ('stack', 'time') | ||||
def __init__(self, stack, time): | def __init__(self, stack, time): | ||||
self.stack = stack | self.stack = stack | ||||
self.time = time | self.time = time | ||||
@classmethod | @classmethod | ||||
def from_frame(cls, frame, time): | def from_frame(cls, frame, time): | ||||
stop() | stop() | ||||
display() | display() | ||||
########################################################################### | ########################################################################### | ||||
## Reporting API | ## Reporting API | ||||
class SiteStats(object): | class SiteStats: | ||||
def __init__(self, site): | def __init__(self, site): | ||||
self.site = site | self.site = site | ||||
self.selfcount = 0 | self.selfcount = 0 | ||||
self.totalcount = 0 | self.totalcount = 0 | ||||
def addself(self): | def addself(self): | ||||
self.selfcount += 1 | self.selfcount += 1 | ||||
count / relevant_samples * 100, | count / relevant_samples * 100, | ||||
child.lineno, | child.lineno, | ||||
pycompat.sysbytes(child.getsource(50)), | pycompat.sysbytes(child.getsource(50)), | ||||
) | ) | ||||
) | ) | ||||
def display_hotpath(data, fp, limit=0.05, **kwargs): | def display_hotpath(data, fp, limit=0.05, **kwargs): | ||||
class HotNode(object): | class HotNode: | ||||
def __init__(self, site): | def __init__(self, site): | ||||
self.site = site | self.site = site | ||||
self.count = 0 | self.count = 0 | ||||
self.children = {} | self.children = {} | ||||
def add(self, stack, time): | def add(self, stack, time): | ||||
self.count += time | self.count += time | ||||
site = stack[0] | site = stack[0] |
FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER | FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER | ||||
FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN | FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN | ||||
FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER | FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER | ||||
FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN | FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN | ||||
FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER | FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER | ||||
FILETYPE_OTHER = FILEFLAGS_OTHER | FILETYPE_OTHER = FILEFLAGS_OTHER | ||||
class basicstore(object): | class basicstore: | ||||
'''base class for local repository stores''' | '''base class for local repository stores''' | ||||
def __init__(self, path, vfstype): | def __init__(self, path, vfstype): | ||||
vfs = vfstype(path) | vfs = vfstype(path) | ||||
self.path = vfs.base | self.path = vfs.base | ||||
self.createmode = _calcmode(vfs) | self.createmode = _calcmode(vfs) | ||||
vfs.createmode = self.createmode | vfs.createmode = self.createmode | ||||
self.rawvfs = vfs | self.rawvfs = vfs | ||||
def join(self, f): | def join(self, f): | ||||
return self.path + b'/' + encodefilename(f) | return self.path + b'/' + encodefilename(f) | ||||
def copylist(self): | def copylist(self): | ||||
return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data] | return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data] | ||||
class fncache(object): | class fncache: | ||||
# the filename used to be partially encoded | # the filename used to be partially encoded | ||||
# hence the encodedir/decodedir dance | # hence the encodedir/decodedir dance | ||||
def __init__(self, vfs): | def __init__(self, vfs): | ||||
self.vfs = vfs | self.vfs = vfs | ||||
self.entries = None | self.entries = None | ||||
self._dirty = False | self._dirty = False | ||||
# set of new additions to fncache | # set of new additions to fncache | ||||
self.addls = set() | self.addls = set() |
_(b'unable to apply stream clone: unsupported format: %s') | _(b'unable to apply stream clone: unsupported format: %s') | ||||
% b', '.join(sorted(missingreqs)) | % b', '.join(sorted(missingreqs)) | ||||
) | ) | ||||
consumev1(repo, fp, filecount, bytecount) | consumev1(repo, fp, filecount, bytecount) | ||||
nodemap.post_stream_cleanup(repo) | nodemap.post_stream_cleanup(repo) | ||||
class streamcloneapplier(object): | class streamcloneapplier: | ||||
"""Class to manage applying streaming clone bundles. | """Class to manage applying streaming clone bundles. | ||||
We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle | We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle | ||||
readers to perform bundle type-specific functionality. | readers to perform bundle type-specific functionality. | ||||
""" | """ | ||||
def __init__(self, fh): | def __init__(self, fh): | ||||
self._fh = fh | self._fh = fh |
if state[2] == b'hg': | if state[2] == b'hg': | ||||
subrev = b"0" * 40 | subrev = b"0" * 40 | ||||
return types[state[2]](pctx, path, (state[0], subrev), True) | return types[state[2]](pctx, path, (state[0], subrev), True) | ||||
# subrepo classes need to implement the following abstract class: | # subrepo classes need to implement the following abstract class: | ||||
class abstractsubrepo(object): | class abstractsubrepo: | ||||
def __init__(self, ctx, path): | def __init__(self, ctx, path): | ||||
"""Initialize abstractsubrepo part | """Initialize abstractsubrepo part | ||||
``ctx`` is the context referring this subrepository in the | ``ctx`` is the context referring this subrepository in the | ||||
parent repository. | parent repository. | ||||
``path`` is the path to this subrepository as seen from | ``path`` is the path to this subrepository as seen from | ||||
innermost repository. | innermost repository. |
return tagnode | return tagnode | ||||
_fnodescachefile = b'hgtagsfnodes1' | _fnodescachefile = b'hgtagsfnodes1' | ||||
_fnodesrecsize = 4 + 20 # changeset fragment + filenode | _fnodesrecsize = 4 + 20 # changeset fragment + filenode | ||||
_fnodesmissingrec = b'\xff' * 24 | _fnodesmissingrec = b'\xff' * 24 | ||||
class hgtagsfnodescache(object): | class hgtagsfnodescache: | ||||
"""Persistent cache mapping revisions to .hgtags filenodes. | """Persistent cache mapping revisions to .hgtags filenodes. | ||||
The cache is an array of records. Each item in the array corresponds to | The cache is an array of records. Each item in the array corresponds to | ||||
a changelog revision. Values in the array contain the first 4 bytes of | a changelog revision. Values in the array contain the first 4 bytes of | ||||
the node hash and the 20 bytes .hgtags filenode for that revision. | the node hash and the 20 bytes .hgtags filenode for that revision. | ||||
The first 4 bytes are present as a form of verification. Repository | The first 4 bytes are present as a form of verification. Repository | ||||
stripping and rewriting may change the node at a numeric revision in the | stripping and rewriting may change the node at a numeric revision in the |
def unquotestring(s): | def unquotestring(s): | ||||
'''unwrap quotes if any; otherwise returns unmodified string''' | '''unwrap quotes if any; otherwise returns unmodified string''' | ||||
if len(s) < 2 or s[0] not in b"'\"" or s[0] != s[-1]: | if len(s) < 2 or s[0] not in b"'\"" or s[0] != s[-1]: | ||||
return s | return s | ||||
return s[1:-1] | return s[1:-1] | ||||
class resourcemapper(object): # pytype: disable=ignored-metaclass | class resourcemapper: # pytype: disable=ignored-metaclass | ||||
"""Mapper of internal template resources""" | """Mapper of internal template resources""" | ||||
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta | ||||
@abc.abstractmethod | @abc.abstractmethod | ||||
def availablekeys(self, mapping): | def availablekeys(self, mapping): | ||||
"""Return a set of available resource keys based on the given mapping""" | """Return a set of available resource keys based on the given mapping""" | ||||
def lookup(self, mapping, key): | def lookup(self, mapping, key): | ||||
return None | return None | ||||
def populatemap(self, context, origmapping, newmapping): | def populatemap(self, context, origmapping, newmapping): | ||||
return {} | return {} | ||||
class engine(object): | class engine: | ||||
"""template expansion engine. | """template expansion engine. | ||||
template expansion works like this. a map file contains key=value | template expansion works like this. a map file contains key=value | ||||
pairs. if value is quoted, it is treated as string. otherwise, it | pairs. if value is quoted, it is treated as string. otherwise, it | ||||
is treated as name of template file. | is treated as name of template file. | ||||
templater is asked to expand a key in map. it looks up key, and | templater is asked to expand a key in map. it looks up key, and | ||||
looks for strings like this: {foo}. it expands {foo} by looking up | looks for strings like this: {foo}. it expands {foo} by looking up | ||||
) | ) | ||||
cache[key] = unquotestring(val) | cache[key] = unquotestring(val) | ||||
elif key != b'__base__': | elif key != b'__base__': | ||||
tmap[key] = os.path.join(base, val) | tmap[key] = os.path.join(base, val) | ||||
aliases.extend(conf.items(b'templatealias')) | aliases.extend(conf.items(b'templatealias')) | ||||
return cache, tmap, aliases | return cache, tmap, aliases | ||||
class loader(object): | class loader: | ||||
"""Load template fragments optionally from a map file""" | """Load template fragments optionally from a map file""" | ||||
def __init__(self, cache, aliases): | def __init__(self, cache, aliases): | ||||
if cache is None: | if cache is None: | ||||
cache = {} | cache = {} | ||||
self.cache = cache.copy() | self.cache = cache.copy() | ||||
self._map = {} | self._map = {} | ||||
self._aliasmap = _aliasrules.buildmap(aliases) | self._aliasmap = _aliasrules.buildmap(aliases) | ||||
This may load additional templates from the map file. | This may load additional templates from the map file. | ||||
""" | """ | ||||
syms = (set(), set()) | syms = (set(), set()) | ||||
self._findsymbolsused(self.load(t), syms) | self._findsymbolsused(self.load(t), syms) | ||||
return syms | return syms | ||||
class templater(object): | class templater: | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
filters=None, | filters=None, | ||||
defaults=None, | defaults=None, | ||||
resources=None, | resources=None, | ||||
cache=None, | cache=None, | ||||
aliases=(), | aliases=(), | ||||
minchunk=1024, | minchunk=1024, |
class ResourceUnavailable(error.Abort): | class ResourceUnavailable(error.Abort): | ||||
pass | pass | ||||
class TemplateNotFound(error.Abort): | class TemplateNotFound(error.Abort): | ||||
pass | pass | ||||
class wrapped(object): # pytype: disable=ignored-metaclass | class wrapped: # pytype: disable=ignored-metaclass | ||||
"""Object requiring extra conversion prior to displaying or processing | """Object requiring extra conversion prior to displaying or processing | ||||
as value | as value | ||||
Use unwrapvalue() or unwrapastype() to obtain the inner object. | Use unwrapvalue() or unwrapastype() to obtain the inner object. | ||||
""" | """ | ||||
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta | ||||
@abc.abstractmethod | @abc.abstractmethod | ||||
def tovalue(self, context, mapping): | def tovalue(self, context, mapping): | ||||
"""Move the inner value object out or create a value representation | """Move the inner value object out or create a value representation | ||||
A returned value must be serializable by templaterfilters.json(). | A returned value must be serializable by templaterfilters.json(). | ||||
""" | """ | ||||
class mappable(object): # pytype: disable=ignored-metaclass | class mappable: # pytype: disable=ignored-metaclass | ||||
"""Object which can be converted to a single template mapping""" | """Object which can be converted to a single template mapping""" | ||||
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta | ||||
def itermaps(self, context): | def itermaps(self, context): | ||||
yield self.tomap(context) | yield self.tomap(context) | ||||
@abc.abstractmethod | @abc.abstractmethod |
def _maybestrurl(maybebytes): | def _maybestrurl(maybebytes): | ||||
return pycompat.rapply(pycompat.strurl, maybebytes) | return pycompat.rapply(pycompat.strurl, maybebytes) | ||||
def _maybebytesurl(maybestr): | def _maybebytesurl(maybestr): | ||||
return pycompat.rapply(pycompat.bytesurl, maybestr) | return pycompat.rapply(pycompat.bytesurl, maybestr) | ||||
class httppasswordmgrdbproxy(object): | class httppasswordmgrdbproxy: | ||||
"""Delays loading urllib2 until it's needed.""" | """Delays loading urllib2 until it's needed.""" | ||||
def __init__(self): | def __init__(self): | ||||
self._mgr = None | self._mgr = None | ||||
def _get_mgr(self): | def _get_mgr(self): | ||||
if self._mgr is None: | if self._mgr is None: | ||||
self._mgr = urlreq.httppasswordmgrwithdefaultrealm() | self._mgr = urlreq.httppasswordmgrwithdefaultrealm() | ||||
# unique object used to detect no default value has been provided when | # unique object used to detect no default value has been provided when | ||||
# retrieving configuration value. | # retrieving configuration value. | ||||
_unset = object() | _unset = object() | ||||
# _reqexithandlers: callbacks run at the end of a request | # _reqexithandlers: callbacks run at the end of a request | ||||
_reqexithandlers = [] | _reqexithandlers = [] | ||||
class ui(object): | class ui: | ||||
def __init__(self, src=None): | def __init__(self, src=None): | ||||
"""Create a fresh new ui object if no src given | """Create a fresh new ui object if no src given | ||||
Use uimod.ui.load() to create a ui which knows global and user configs. | Use uimod.ui.load() to create a ui which knows global and user configs. | ||||
In most cases, you should use ui.copy() to create a copy of an existing | In most cases, you should use ui.copy() to create a copy of an existing | ||||
ui object. | ui object. | ||||
""" | """ | ||||
# _buffers: used for temporary capture of output | # _buffers: used for temporary capture of output |
return self.revlog2.iscensored(self.revlog2.rev(node)) | return self.revlog2.iscensored(self.revlog2.rev(node)) | ||||
class unionpeer(localrepo.localpeer): | class unionpeer(localrepo.localpeer): | ||||
def canpush(self): | def canpush(self): | ||||
return False | return False | ||||
class unionrepository(object): | class unionrepository: | ||||
"""Represents the union of data in 2 repositories. | """Represents the union of data in 2 repositories. | ||||
Instances are not usable if constructed directly. Use ``instance()`` | Instances are not usable if constructed directly. Use ``instance()`` | ||||
or ``makeunionrepository()`` to create a usable instance. | or ``makeunionrepository()`` to create a usable instance. | ||||
""" | """ | ||||
def __init__(self, repo2, url): | def __init__(self, repo2, url): | ||||
self.repo2 = repo2 | self.repo2 = repo2 |
} | } | ||||
return preserved & repo.requirements | return preserved & repo.requirements | ||||
FORMAT_VARIANT = b'deficiency' | FORMAT_VARIANT = b'deficiency' | ||||
OPTIMISATION = b'optimization' | OPTIMISATION = b'optimization' | ||||
class improvement(object): | class improvement: | ||||
"""Represents an improvement that can be made as part of an upgrade.""" | """Represents an improvement that can be made as part of an upgrade.""" | ||||
### The following attributes should be defined for each subclass: | ### The following attributes should be defined for each subclass: | ||||
# Either ``FORMAT_VARIANT`` or ``OPTIMISATION``. | # Either ``FORMAT_VARIANT`` or ``OPTIMISATION``. | ||||
# A format variant is where we change the storage format. Not all format | # A format variant is where we change the storage format. Not all format | ||||
# variant changes are an obvious problem. | # variant changes are an obvious problem. | ||||
# An optimization is an action (sometimes optional) that | # An optimization is an action (sometimes optional) that | ||||
newactions.extend(o for o in sorted(optimizations) if o not in newactions) | newactions.extend(o for o in sorted(optimizations) if o not in newactions) | ||||
# FUTURE consider adding some optimizations here for certain transitions. | # FUTURE consider adding some optimizations here for certain transitions. | ||||
# e.g. adding generaldelta could schedule parent redeltas. | # e.g. adding generaldelta could schedule parent redeltas. | ||||
return newactions | return newactions | ||||
class UpgradeOperation(object): | class UpgradeOperation: | ||||
"""represent the work to be done during an upgrade""" | """represent the work to be done during an upgrade""" | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
ui, | ui, | ||||
new_requirements, | new_requirements, | ||||
current_requirements, | current_requirements, | ||||
upgrade_actions, | upgrade_actions, |
s = s.replace(b"&", b"&") | s = s.replace(b"&", b"&") | ||||
s = s.replace(b"<", b"<") | s = s.replace(b"<", b"<") | ||||
s = s.replace(b">", b">") | s = s.replace(b">", b">") | ||||
if quote: | if quote: | ||||
s = s.replace(b'"', b""") | s = s.replace(b'"', b""") | ||||
return s | return s | ||||
class passwordmgr(object): | class passwordmgr: | ||||
def __init__(self, ui, passwddb): | def __init__(self, ui, passwddb): | ||||
self.ui = ui | self.ui = ui | ||||
self.passwddb = passwddb | self.passwddb = passwddb | ||||
def add_password(self, realm, uri, user, passwd): | def add_password(self, realm, uri, user, passwd): | ||||
return self.passwddb.add_password(realm, uri, user, passwd) | return self.passwddb.add_password(realm, uri, user, passwd) | ||||
def find_user_password(self, realm, authuri): | def find_user_password(self, realm, authuri): |
import urllib.response | import urllib.response | ||||
from .pycompat import getattr | from .pycompat import getattr | ||||
from . import pycompat | from . import pycompat | ||||
_sysstr = pycompat.sysstr | _sysstr = pycompat.sysstr | ||||
class _pycompatstub(object): | class _pycompatstub: | ||||
def __init__(self): | def __init__(self): | ||||
self._aliases = {} | self._aliases = {} | ||||
def _registeraliases(self, origin, items): | def _registeraliases(self, origin, items): | ||||
"""Add items that will be populated at the first access""" | """Add items that will be populated at the first access""" | ||||
items = map(_sysstr, items) | items = map(_sysstr, items) | ||||
self._aliases.update( | self._aliases.update( | ||||
(item.replace('_', '').lower(), (origin, item)) for item in items | (item.replace('_', '').lower(), (origin, item)) for item in items |
} | } | ||||
# List of digest types from strongest to weakest | # List of digest types from strongest to weakest | ||||
DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5'] | DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5'] | ||||
for k in DIGESTS_BY_STRENGTH: | for k in DIGESTS_BY_STRENGTH: | ||||
assert k in DIGESTS | assert k in DIGESTS | ||||
class digester(object): | class digester: | ||||
"""helper to compute digests. | """helper to compute digests. | ||||
This helper can be used to compute one or more digests given their name. | This helper can be used to compute one or more digests given their name. | ||||
>>> d = digester([b'md5', b'sha1']) | >>> d = digester([b'md5', b'sha1']) | ||||
>>> d.update(b'foo') | >>> d.update(b'foo') | ||||
>>> [k for k in sorted(d)] | >>> [k for k in sorted(d)] | ||||
['md5', 'sha1'] | ['md5', 'sha1'] | ||||
"""returns the strongest digest type in both supported and DIGESTS.""" | """returns the strongest digest type in both supported and DIGESTS.""" | ||||
for k in DIGESTS_BY_STRENGTH: | for k in DIGESTS_BY_STRENGTH: | ||||
if k in supported: | if k in supported: | ||||
return k | return k | ||||
return None | return None | ||||
class digestchecker(object): | class digestchecker: | ||||
"""file handle wrapper that additionally checks content against a given | """file handle wrapper that additionally checks content against a given | ||||
size and digests. | size and digests. | ||||
d = digestchecker(fh, size, {'md5': '...'}) | d = digestchecker(fh, size, {'md5': '...'}) | ||||
When multiple digests are given, all of them are validated. | When multiple digests are given, all of them are validated. | ||||
""" | """ | ||||
if length is not None: | if length is not None: | ||||
return memoryview(sliceable)[offset : offset + length] | return memoryview(sliceable)[offset : offset + length] | ||||
return memoryview(sliceable)[offset:] | return memoryview(sliceable)[offset:] | ||||
_chunksize = 4096 | _chunksize = 4096 | ||||
class bufferedinputpipe(object): | class bufferedinputpipe: | ||||
"""a manually buffered input pipe | """a manually buffered input pipe | ||||
Python will not let us use buffered IO and lazy reading with 'polling' at | Python will not let us use buffered IO and lazy reading with 'polling' at | ||||
the same time. We cannot probe the buffer state and select will not detect | the same time. We cannot probe the buffer state and select will not detect | ||||
that data are ready to read if they are already buffered. | that data are ready to read if they are already buffered. | ||||
This class let us work around that by implementing its own buffering | This class let us work around that by implementing its own buffering | ||||
(allowing efficient readline) while offering a way to know if the buffer is | (allowing efficient readline) while offering a way to know if the buffer is | ||||
except ValueError: | except ValueError: | ||||
# Empty files cannot be mmapped, but mmapread should still work. Check | # Empty files cannot be mmapped, but mmapread should still work. Check | ||||
# if the file is empty, and if so, return an empty buffer. | # if the file is empty, and if so, return an empty buffer. | ||||
if os.fstat(fd).st_size == 0: | if os.fstat(fd).st_size == 0: | ||||
return b'' | return b'' | ||||
raise | raise | ||||
class fileobjectproxy(object): | class fileobjectproxy: | ||||
"""A proxy around file objects that tells a watcher when events occur. | """A proxy around file objects that tells a watcher when events occur. | ||||
This type is intended to only be used for testing purposes. Think hard | This type is intended to only be used for testing purposes. Think hard | ||||
before using it in important code. | before using it in important code. | ||||
""" | """ | ||||
__slots__ = ( | __slots__ = ( | ||||
'_orig', | '_orig', | ||||
'sendto', | 'sendto', | ||||
'setblocking', | 'setblocking', | ||||
'settimeout', | 'settimeout', | ||||
'gettimeout', | 'gettimeout', | ||||
'setsockopt', | 'setsockopt', | ||||
} | } | ||||
class socketproxy(object): | class socketproxy: | ||||
"""A proxy around a socket that tells a watcher when events occur. | """A proxy around a socket that tells a watcher when events occur. | ||||
This is like ``fileobjectproxy`` except for sockets. | This is like ``fileobjectproxy`` except for sockets. | ||||
This type is intended to only be used for testing purposes. Think hard | This type is intended to only be used for testing purposes. Think hard | ||||
before using it in important code. | before using it in important code. | ||||
""" | """ | ||||
) | ) | ||||
def setsockopt(self, *args, **kwargs): | def setsockopt(self, *args, **kwargs): | ||||
return object.__getattribute__(self, '_observedcall')( | return object.__getattribute__(self, '_observedcall')( | ||||
'setsockopt', *args, **kwargs | 'setsockopt', *args, **kwargs | ||||
) | ) | ||||
class baseproxyobserver(object): | class baseproxyobserver: | ||||
def __init__(self, fh, name, logdata, logdataapis): | def __init__(self, fh, name, logdata, logdataapis): | ||||
self.fh = fh | self.fh = fh | ||||
self.name = name | self.name = name | ||||
self.logdata = logdata | self.logdata = logdata | ||||
self.logdataapis = logdataapis | self.logdataapis = logdataapis | ||||
def _writedata(self, data): | def _writedata(self, data): | ||||
if not self.logdata: | if not self.logdata: | ||||
def f(*args): | def f(*args): | ||||
if args not in cache: | if args not in cache: | ||||
cache[args] = func(*args) | cache[args] = func(*args) | ||||
return cache[args] | return cache[args] | ||||
return f | return f | ||||
class cow(object): | class cow: | ||||
"""helper class to make copy-on-write easier | """helper class to make copy-on-write easier | ||||
Call preparewrite before doing any writes. | Call preparewrite before doing any writes. | ||||
""" | """ | ||||
def preparewrite(self): | def preparewrite(self): | ||||
"""call this before writes, return self or a copied new object""" | """call this before writes, return self or a copied new object""" | ||||
if getattr(self, '_copied', 0): | if getattr(self, '_copied', 0): | ||||
class cowsortdict(cow, sortdict): | class cowsortdict(cow, sortdict): | ||||
"""copy-on-write sortdict | """copy-on-write sortdict | ||||
Be sure to call d = d.preparewrite() before writing to d. | Be sure to call d = d.preparewrite() before writing to d. | ||||
""" | """ | ||||
class transactional(object): # pytype: disable=ignored-metaclass | class transactional: # pytype: disable=ignored-metaclass | ||||
"""Base class for making a transactional type into a context manager.""" | """Base class for making a transactional type into a context manager.""" | ||||
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta | ||||
@abc.abstractmethod | @abc.abstractmethod | ||||
def close(self): | def close(self): | ||||
"""Successfully closes the transaction.""" | """Successfully closes the transaction.""" | ||||
tr.release() | tr.release() | ||||
@contextlib.contextmanager | @contextlib.contextmanager | ||||
def nullcontextmanager(enter_result=None): | def nullcontextmanager(enter_result=None): | ||||
yield enter_result | yield enter_result | ||||
class _lrucachenode(object): | class _lrucachenode: | ||||
"""A node in a doubly linked list. | """A node in a doubly linked list. | ||||
Holds a reference to nodes on either side as well as a key-value | Holds a reference to nodes on either side as well as a key-value | ||||
pair for the dictionary entry. | pair for the dictionary entry. | ||||
""" | """ | ||||
__slots__ = ('next', 'prev', 'key', 'value', 'cost') | __slots__ = ('next', 'prev', 'key', 'value', 'cost') | ||||
def __init__(self): | def __init__(self): | ||||
self.next = self | self.next = self | ||||
self.prev = self | self.prev = self | ||||
self.key = _notset | self.key = _notset | ||||
self.value = None | self.value = None | ||||
self.cost = 0 | self.cost = 0 | ||||
def markempty(self): | def markempty(self): | ||||
"""Mark the node as emptied.""" | """Mark the node as emptied.""" | ||||
self.key = _notset | self.key = _notset | ||||
self.value = None | self.value = None | ||||
self.cost = 0 | self.cost = 0 | ||||
class lrucachedict(object): | class lrucachedict: | ||||
"""Dict that caches most recent accesses and sets. | """Dict that caches most recent accesses and sets. | ||||
The dict consists of an actual backing dict - indexed by original | The dict consists of an actual backing dict - indexed by original | ||||
key - and a doubly linked circular list defining the order of entries in | key - and a doubly linked circular list defining the order of entries in | ||||
the cache. | the cache. | ||||
The head node is the newest entry in the cache. If the cache is full, | The head node is the newest entry in the cache. If the cache is full, | ||||
we recycle head.prev and make it the new head. Cache accesses result in | we recycle head.prev and make it the new head. Cache accesses result in | ||||
else: | else: | ||||
order.remove(args) | order.remove(args) | ||||
order.append(args) | order.append(args) | ||||
return cache[args] | return cache[args] | ||||
return f | return f | ||||
class propertycache(object): | class propertycache: | ||||
def __init__(self, func): | def __init__(self, func): | ||||
self.func = func | self.func = func | ||||
self.name = func.__name__ | self.name = func.__name__ | ||||
def __get__(self, obj, type=None): | def __get__(self, obj, type=None): | ||||
result = self.func(obj) | result = self.func(obj) | ||||
self.cachevalue(obj, result) | self.cachevalue(obj, result) | ||||
return result | return result | ||||
try: | try: | ||||
import re2 # pytype: disable=import-error | import re2 # pytype: disable=import-error | ||||
_re2 = None | _re2 = None | ||||
except ImportError: | except ImportError: | ||||
_re2 = False | _re2 = False | ||||
class _re(object): | class _re: | ||||
def _checkre2(self): | def _checkre2(self): | ||||
global _re2 | global _re2 | ||||
global _re2_input | global _re2_input | ||||
check_pattern = br'\[([^\[]+)\]' | check_pattern = br'\[([^\[]+)\]' | ||||
check_input = b'[ui]' | check_input = b'[ui]' | ||||
try: | try: | ||||
# check if match works, see issue3964 | # check if match works, see issue3964 | ||||
try: | try: | ||||
os.unlink(temp) | os.unlink(temp) | ||||
except OSError: | except OSError: | ||||
pass | pass | ||||
raise | raise | ||||
return temp | return temp | ||||
class filestat(object): | class filestat: | ||||
"""help to exactly detect change of a file | """help to exactly detect change of a file | ||||
'stat' attribute is result of 'os.stat()' if specified 'path' | 'stat' attribute is result of 'os.stat()' if specified 'path' | ||||
exists. Otherwise, it is None. This can avoid preparative | exists. Otherwise, it is None. This can avoid preparative | ||||
'exists()' examination on client side of this class. | 'exists()' examination on client side of this class. | ||||
""" | """ | ||||
def __init__(self, stat): | def __init__(self, stat): | ||||
return False | return False | ||||
raise | raise | ||||
return True | return True | ||||
def __ne__(self, other): | def __ne__(self, other): | ||||
return not self == other | return not self == other | ||||
class atomictempfile(object): | class atomictempfile: | ||||
"""writable file object that atomically updates a file | """writable file object that atomically updates a file | ||||
All writes will go to a temporary copy of the original file. Call | All writes will go to a temporary copy of the original file. Call | ||||
close() when you are done writing, and atomictempfile will rename | close() when you are done writing, and atomictempfile will rename | ||||
the temporary copy to the original name, making the changes | the temporary copy to the original name, making the changes | ||||
visible. If the object is destroyed without being closed, all your | visible. If the object is destroyed without being closed, all your | ||||
writes are discarded. | writes are discarded. | ||||
def appendfile(path, text): | def appendfile(path, text): | ||||
# type: (bytes, bytes) -> None | # type: (bytes, bytes) -> None | ||||
with open(path, b'ab') as fp: | with open(path, b'ab') as fp: | ||||
fp.write(text) | fp.write(text) | ||||
class chunkbuffer(object): | class chunkbuffer: | ||||
"""Allow arbitrary sized chunks of data to be efficiently read from an | """Allow arbitrary sized chunks of data to be efficiently read from an | ||||
iterator over chunks of arbitrary size.""" | iterator over chunks of arbitrary size.""" | ||||
def __init__(self, in_iter): | def __init__(self, in_iter): | ||||
"""in_iter is the iterator that's iterating over the input chunks.""" | """in_iter is the iterator that's iterating over the input chunks.""" | ||||
def splitbig(chunks): | def splitbig(chunks): | ||||
for chunk in chunks: | for chunk in chunks: | ||||
s = nbytes and f.read(nbytes) | s = nbytes and f.read(nbytes) | ||||
if not s: | if not s: | ||||
break | break | ||||
if limit: | if limit: | ||||
limit -= len(s) | limit -= len(s) | ||||
yield s | yield s | ||||
class cappedreader(object): | class cappedreader: | ||||
"""A file object proxy that allows reading up to N bytes. | """A file object proxy that allows reading up to N bytes. | ||||
Given a source file object, instances of this type allow reading up to | Given a source file object, instances of this type allow reading up to | ||||
N bytes from that source file object. Attempts to read past the allowed | N bytes from that source file object. Attempts to read past the allowed | ||||
limit are treated as EOF. | limit are treated as EOF. | ||||
It is assumed that I/O is not performed on the original file object | It is assumed that I/O is not performed on the original file object | ||||
in addition to I/O that is performed by this instance. If there is, | in addition to I/O that is performed by this instance. If there is, | ||||
(1, 1 << 20, _(b'%.2f MB')), | (1, 1 << 20, _(b'%.2f MB')), | ||||
(100, 1 << 10, _(b'%.0f KB')), | (100, 1 << 10, _(b'%.0f KB')), | ||||
(10, 1 << 10, _(b'%.1f KB')), | (10, 1 << 10, _(b'%.1f KB')), | ||||
(1, 1 << 10, _(b'%.2f KB')), | (1, 1 << 10, _(b'%.2f KB')), | ||||
(1, 1, _(b'%.0f bytes')), | (1, 1, _(b'%.0f bytes')), | ||||
) | ) | ||||
class transformingwriter(object): | class transformingwriter: | ||||
"""Writable file wrapper to transform data by function""" | """Writable file wrapper to transform data by function""" | ||||
def __init__(self, fp, encode): | def __init__(self, fp, encode): | ||||
self._fp = fp | self._fp = fp | ||||
self._encode = encode | self._encode = encode | ||||
def close(self): | def close(self): | ||||
self._fp.close() | self._fp.close() | ||||
(1, 0.000001, _(b'%.3f us')), | (1, 0.000001, _(b'%.3f us')), | ||||
(100, 0.000000001, _(b'%.1f ns')), | (100, 0.000000001, _(b'%.1f ns')), | ||||
(10, 0.000000001, _(b'%.2f ns')), | (10, 0.000000001, _(b'%.2f ns')), | ||||
(1, 0.000000001, _(b'%.3f ns')), | (1, 0.000000001, _(b'%.3f ns')), | ||||
) | ) | ||||
@attr.s | @attr.s | ||||
class timedcmstats(object): | class timedcmstats: | ||||
"""Stats information produced by the timedcm context manager on entering.""" | """Stats information produced by the timedcm context manager on entering.""" | ||||
# the starting value of the timer as a float (meaning and resulution is | # the starting value of the timer as a float (meaning and resulution is | ||||
# platform dependent, see util.timer) | # platform dependent, see util.timer) | ||||
start = attr.ib(default=attr.Factory(lambda: timer())) | start = attr.ib(default=attr.Factory(lambda: timer())) | ||||
# the number of seconds as a floating point value; starts at 0, updated when | # the number of seconds as a floating point value; starts at 0, updated when | ||||
# the context is exited. | # the context is exited. | ||||
elapsed = attr.ib(default=0) | elapsed = attr.ib(default=0) | ||||
for k, u in _sizeunits: | for k, u in _sizeunits: | ||||
if t.endswith(k): | if t.endswith(k): | ||||
return int(float(t[: -len(k)]) * u) | return int(float(t[: -len(k)]) * u) | ||||
return int(t) | return int(t) | ||||
except ValueError: | except ValueError: | ||||
raise error.ParseError(_(b"couldn't parse size: %s") % s) | raise error.ParseError(_(b"couldn't parse size: %s") % s) | ||||
class hooks(object): | class hooks: | ||||
"""A collection of hook functions that can be used to extend a | """A collection of hook functions that can be used to extend a | ||||
function's behavior. Hooks are called in lexicographic order, | function's behavior. Hooks are called in lexicographic order, | ||||
based on the names of their sources.""" | based on the names of their sources.""" | ||||
def __init__(self): | def __init__(self): | ||||
self._hooks = [] | self._hooks = [] | ||||
def add(self, source, hook): | def add(self, source, hook): |
def __new__(cls, v, first=False, last=False): | def __new__(cls, v, first=False, last=False): | ||||
self = bytes.__new__(cls, v) | self = bytes.__new__(cls, v) | ||||
self.isfirst = first | self.isfirst = first | ||||
self.islast = last | self.islast = last | ||||
return self | return self | ||||
class sansiodecoder(object): | class sansiodecoder: | ||||
"""A CBOR decoder that doesn't perform its own I/O. | """A CBOR decoder that doesn't perform its own I/O. | ||||
To use, construct an instance and feed it segments containing | To use, construct an instance and feed it segments containing | ||||
CBOR-encoded bytes via ``decode()``. The return value from ``decode()`` | CBOR-encoded bytes via ``decode()``. The return value from ``decode()`` | ||||
indicates whether a fully-decoded value is available, how many bytes | indicates whether a fully-decoded value is available, how many bytes | ||||
were consumed, and offers a hint as to how many bytes should be fed | were consumed, and offers a hint as to how many bytes should be fed | ||||
in next time to decode the next value. | in next time to decode the next value. | ||||
Once values are retrieved, they won't be available on the next call. | Once values are retrieved, they won't be available on the next call. | ||||
""" | """ | ||||
l = list(self._decodedvalues) | l = list(self._decodedvalues) | ||||
self._decodedvalues = [] | self._decodedvalues = [] | ||||
return l | return l | ||||
class bufferingdecoder(object): | class bufferingdecoder: | ||||
"""A CBOR decoder that buffers undecoded input. | """A CBOR decoder that buffers undecoded input. | ||||
This is a glorified wrapper around ``sansiodecoder`` that adds a buffering | This is a glorified wrapper around ``sansiodecoder`` that adds a buffering | ||||
layer. All input that isn't consumed by ``sansiodecoder`` will be buffered | layer. All input that isn't consumed by ``sansiodecoder`` will be buffered | ||||
and concatenated with any new input that arrives later. | and concatenated with any new input that arrives later. | ||||
TODO consider adding limits as to the maximum amount of data that can | TODO consider adding limits as to the maximum amount of data that can | ||||
be buffered. | be buffered. |
CLIENTROLE = b'client' | CLIENTROLE = b'client' | ||||
compewireprotosupport = collections.namedtuple( | compewireprotosupport = collections.namedtuple( | ||||
'compenginewireprotosupport', | 'compenginewireprotosupport', | ||||
('name', 'serverpriority', 'clientpriority'), | ('name', 'serverpriority', 'clientpriority'), | ||||
) | ) | ||||
class propertycache(object): | class propertycache: | ||||
def __init__(self, func): | def __init__(self, func): | ||||
self.func = func | self.func = func | ||||
self.name = func.__name__ | self.name = func.__name__ | ||||
def __get__(self, obj, type=None): | def __get__(self, obj, type=None): | ||||
result = self.func(obj) | result = self.func(obj) | ||||
self.cachevalue(obj, result) | self.cachevalue(obj, result) | ||||
return result | return result | ||||
def cachevalue(self, obj, value): | def cachevalue(self, obj, value): | ||||
# __dict__ assignment required to bypass __setattr__ (eg: repoview) | # __dict__ assignment required to bypass __setattr__ (eg: repoview) | ||||
obj.__dict__[self.name] = value | obj.__dict__[self.name] = value | ||||
class compressormanager(object): | class compressormanager: | ||||
"""Holds registrations of various compression engines. | """Holds registrations of various compression engines. | ||||
This class essentially abstracts the differences between compression | This class essentially abstracts the differences between compression | ||||
engines to allow new compression formats to be added easily, possibly from | engines to allow new compression formats to be added easily, possibly from | ||||
extensions. | extensions. | ||||
Compressors are registered against the global instance by calling its | Compressors are registered against the global instance by calling its | ||||
``register()`` method. | ``register()`` method. | ||||
Will raise KeyError if the revlog header value isn't registered. | Will raise KeyError if the revlog header value isn't registered. | ||||
""" | """ | ||||
return self._engines[self._revlogheaders[header]] | return self._engines[self._revlogheaders[header]] | ||||
compengines = compressormanager() | compengines = compressormanager() | ||||
class compressionengine(object): | class compressionengine: | ||||
"""Base class for compression engines. | """Base class for compression engines. | ||||
Compression engines must implement the interface defined by this class. | Compression engines must implement the interface defined by this class. | ||||
""" | """ | ||||
def name(self): | def name(self): | ||||
"""Returns the name of the compression engine. | """Returns the name of the compression engine. | ||||
``revlogheader()``. The method should return the raw, uncompressed | ``revlogheader()``. The method should return the raw, uncompressed | ||||
data or raise a ``StorageError``. | data or raise a ``StorageError``. | ||||
The object is reusable but is not thread safe. | The object is reusable but is not thread safe. | ||||
""" | """ | ||||
raise NotImplementedError() | raise NotImplementedError() | ||||
class _CompressedStreamReader(object): | class _CompressedStreamReader: | ||||
def __init__(self, fh): | def __init__(self, fh): | ||||
if safehasattr(fh, 'unbufferedread'): | if safehasattr(fh, 'unbufferedread'): | ||||
self._reader = fh.unbufferedread | self._reader = fh.unbufferedread | ||||
else: | else: | ||||
self._reader = fh.read | self._reader = fh.read | ||||
self._pending = [] | self._pending = [] | ||||
self._pos = 0 | self._pos = 0 | ||||
self._eof = False | self._eof = False | ||||
if data: | if data: | ||||
yield data | yield data | ||||
yield z.flush() | yield z.flush() | ||||
def decompressorreader(self, fh): | def decompressorreader(self, fh): | ||||
return _GzipCompressedStreamReader(fh) | return _GzipCompressedStreamReader(fh) | ||||
class zlibrevlogcompressor(object): | class zlibrevlogcompressor: | ||||
def __init__(self, level=None): | def __init__(self, level=None): | ||||
self._level = level | self._level = level | ||||
def compress(self, data): | def compress(self, data): | ||||
insize = len(data) | insize = len(data) | ||||
# Caller handles empty input case. | # Caller handles empty input case. | ||||
assert insize > 0 | assert insize > 0 | ||||
return b'\0' | return b'\0' | ||||
def compressstream(self, it, opts=None): | def compressstream(self, it, opts=None): | ||||
return it | return it | ||||
def decompressorreader(self, fh): | def decompressorreader(self, fh): | ||||
return fh | return fh | ||||
class nooprevlogcompressor(object): | class nooprevlogcompressor: | ||||
def compress(self, data): | def compress(self, data): | ||||
return None | return None | ||||
def revlogcompressor(self, opts=None): | def revlogcompressor(self, opts=None): | ||||
return self.nooprevlogcompressor() | return self.nooprevlogcompressor() | ||||
compengines.register(_noopengine()) | compengines.register(_noopengine()) | ||||
if data: | if data: | ||||
yield data | yield data | ||||
yield z.flush() | yield z.flush() | ||||
def decompressorreader(self, fh): | def decompressorreader(self, fh): | ||||
return _ZstdCompressedStreamReader(fh, self._module) | return _ZstdCompressedStreamReader(fh, self._module) | ||||
class zstdrevlogcompressor(object): | class zstdrevlogcompressor: | ||||
def __init__(self, zstd, level=3): | def __init__(self, zstd, level=3): | ||||
# TODO consider omitting frame magic to save 4 bytes. | # TODO consider omitting frame magic to save 4 bytes. | ||||
# This writes content sizes into the frame header. That is | # This writes content sizes into the frame header. That is | ||||
# extra storage. But it allows a correct size memory allocation | # extra storage. But it allows a correct size memory allocation | ||||
# to hold the result. | # to hold the result. | ||||
self._cctx = zstd.ZstdCompressor(level=level) | self._cctx = zstd.ZstdCompressor(level=level) | ||||
self._dctx = zstd.ZstdDecompressor() | self._dctx = zstd.ZstdDecompressor() | ||||
self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE | self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE | ||||
def bundlecompressiontopics(): | def bundlecompressiontopics(): | ||||
"""Obtains a list of available bundle compressions for use in help.""" | """Obtains a list of available bundle compressions for use in help.""" | ||||
# help.makeitemsdocs() expects a dict of names to items with a .__doc__. | # help.makeitemsdocs() expects a dict of names to items with a .__doc__. | ||||
items = {} | items = {} | ||||
# We need to format the docstring. So use a dummy object/type to hold it | # We need to format the docstring. So use a dummy object/type to hold it | ||||
# rather than mutating the original. | # rather than mutating the original. | ||||
class docobject(object): | class docobject: | ||||
pass | pass | ||||
for name in compengines: | for name in compengines: | ||||
engine = compengines[name] | engine = compengines[name] | ||||
if not engine.available(): | if not engine.available(): | ||||
continue | continue | ||||
def readinto(self, b): | def readinto(self, b): | ||||
raise IOError(errno.EBADF, 'Bad file descriptor') | raise IOError(errno.EBADF, 'Bad file descriptor') | ||||
def write(self, b): | def write(self, b): | ||||
raise IOError(errno.EBADF, 'Bad file descriptor') | raise IOError(errno.EBADF, 'Bad file descriptor') | ||||
class LineBufferedWrapper(object): | class LineBufferedWrapper: | ||||
def __init__(self, orig): | def __init__(self, orig): | ||||
self.orig = orig | self.orig = orig | ||||
def __getattr__(self, attr): | def __getattr__(self, attr): | ||||
return getattr(self.orig, attr) | return getattr(self.orig, attr) | ||||
def write(self, s): | def write(self, s): | ||||
orig = self.orig | orig = self.orig | ||||
def unwrap_line_buffered(stream): | def unwrap_line_buffered(stream): | ||||
if isinstance(stream, LineBufferedWrapper): | if isinstance(stream, LineBufferedWrapper): | ||||
assert not isinstance(stream.orig, LineBufferedWrapper) | assert not isinstance(stream.orig, LineBufferedWrapper) | ||||
return stream.orig | return stream.orig | ||||
return stream | return stream | ||||
class WriteAllWrapper(object): | class WriteAllWrapper: | ||||
def __init__(self, orig): | def __init__(self, orig): | ||||
self.orig = orig | self.orig = orig | ||||
def __getattr__(self, attr): | def __getattr__(self, attr): | ||||
return getattr(self.orig, attr) | return getattr(self.orig, attr) | ||||
def write(self, s): | def write(self, s): | ||||
write1 = self.orig.write | write1 = self.orig.write | ||||
def explainexit(code): | def explainexit(code): | ||||
"""return a message describing a subprocess status | """return a message describing a subprocess status | ||||
(codes from kill are negative - not os.system/wait encoding)""" | (codes from kill are negative - not os.system/wait encoding)""" | ||||
if code >= 0: | if code >= 0: | ||||
return _(b"exited with status %d") % code | return _(b"exited with status %d") % code | ||||
return _(b"killed by signal %d") % -code | return _(b"killed by signal %d") % -code | ||||
class _pfile(object): | class _pfile: | ||||
"""File-like wrapper for a stream opened by subprocess.Popen()""" | """File-like wrapper for a stream opened by subprocess.Popen()""" | ||||
def __init__(self, proc, fp): | def __init__(self, proc, fp): | ||||
self._proc = proc | self._proc = proc | ||||
self._fp = fp | self._fp = fp | ||||
def close(self): | def close(self): | ||||
# unlike os.popen(), this returns an integer in subprocess coding | # unlike os.popen(), this returns an integer in subprocess coding |
f = author.find(b'<') | f = author.find(b'<') | ||||
if f != -1: | if f != -1: | ||||
return author[:f].strip(b' "').replace(b'\\"', b'"') | return author[:f].strip(b' "').replace(b'\\"', b'"') | ||||
f = author.find(b'@') | f = author.find(b'@') | ||||
return author[:f].replace(b'.', b' ') | return author[:f].replace(b'.', b' ') | ||||
@attr.s(hash=True) | @attr.s(hash=True) | ||||
class mailmapping(object): | class mailmapping: | ||||
"""Represents a username/email key or value in | """Represents a username/email key or value in | ||||
a mailmap file""" | a mailmap file""" | ||||
email = attr.ib() | email = attr.ib() | ||||
name = attr.ib(default=None) | name = attr.ib(default=None) | ||||
def _ismailmaplineinvalid(names, emails): | def _ismailmaplineinvalid(names, emails): |
try: | try: | ||||
return socket.getservbyname(pycompat.sysstr(port)) | return socket.getservbyname(pycompat.sysstr(port)) | ||||
except socket.error: | except socket.error: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"no port number associated with service '%s'") % port | _(b"no port number associated with service '%s'") % port | ||||
) | ) | ||||
class url(object): | class url: | ||||
r"""Reliable URL parser. | r"""Reliable URL parser. | ||||
This parses URLs and provides attributes for the following | This parses URLs and provides attributes for the following | ||||
components: | components: | ||||
<scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment> | <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment> | ||||
Missing components are set to None. The only exception is | Missing components are set to None. The only exception is | ||||
path.rawloc = b'%s#%s' % (base, path.branch) | path.rawloc = b'%s#%s' % (base, path.branch) | ||||
suboptions = subpath._all_sub_opts.copy() | suboptions = subpath._all_sub_opts.copy() | ||||
suboptions.update(path._own_sub_opts) | suboptions.update(path._own_sub_opts) | ||||
path._apply_suboptions(ui, suboptions) | path._apply_suboptions(ui, suboptions) | ||||
new_paths.append(path) | new_paths.append(path) | ||||
return new_paths | return new_paths | ||||
class path(object): | class path: | ||||
"""Represents an individual path and its configuration.""" | """Represents an individual path and its configuration.""" | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
ui=None, | ui=None, | ||||
name=None, | name=None, | ||||
rawloc=None, | rawloc=None, | ||||
suboptions=None, | suboptions=None, |
b"warning: copy source of '%s' not in parents of %s" | b"warning: copy source of '%s' not in parents of %s" | ||||
) | ) | ||||
WARN_NULLID_COPY_SOURCE = _( | WARN_NULLID_COPY_SOURCE = _( | ||||
b"warning: %s@%s: copy source revision is nullid %s:%s\n" | b"warning: %s@%s: copy source revision is nullid %s:%s\n" | ||||
) | ) | ||||
class verifier(object): | class verifier: | ||||
def __init__(self, repo, level=None): | def __init__(self, repo, level=None): | ||||
self.repo = repo.unfiltered() | self.repo = repo.unfiltered() | ||||
self.ui = repo.ui | self.ui = repo.ui | ||||
self.match = repo.narrowmatch() | self.match = repo.narrowmatch() | ||||
if level is None: | if level is None: | ||||
level = VERIFY_DEFAULT | level = VERIFY_DEFAULT | ||||
self._level = level | self._level = level | ||||
self.badrevs = set() | self.badrevs = set() |
if not checkandavoid(): | if not checkandavoid(): | ||||
# simply copy to change owner of path to get privilege to | # simply copy to change owner of path to get privilege to | ||||
# advance mtime (see issue5418) | # advance mtime (see issue5418) | ||||
util.rename(util.mktempcopy(path), path) | util.rename(util.mktempcopy(path), path) | ||||
checkandavoid() | checkandavoid() | ||||
class abstractvfs(object): | class abstractvfs: | ||||
"""Abstract base class; cannot be instantiated""" | """Abstract base class; cannot be instantiated""" | ||||
# default directory separator for vfs | # default directory separator for vfs | ||||
# | # | ||||
# Other vfs code always use `/` and this works fine because python file API | # Other vfs code always use `/` and this works fine because python file API | ||||
# abstract the use of `/` and make it work transparently. For consistency | # abstract the use of `/` and make it work transparently. For consistency | ||||
# vfs will always use `/` when joining. This avoid some confusion in | # vfs will always use `/` when joining. This avoid some confusion in | ||||
# encoded vfs (see issue6546) | # encoded vfs (see issue6546) | ||||
if mode not in (b'r', b'rb'): | if mode not in (b'r', b'rb'): | ||||
raise error.Abort(_(b'this vfs is read only')) | raise error.Abort(_(b'this vfs is read only')) | ||||
return self.vfs(path, mode, *args, **kw) | return self.vfs(path, mode, *args, **kw) | ||||
def join(self, path, *insidef): | def join(self, path, *insidef): | ||||
return self.vfs.join(path, *insidef) | return self.vfs.join(path, *insidef) | ||||
class closewrapbase(object): | class closewrapbase: | ||||
"""Base class of wrapper, which hooks closing | """Base class of wrapper, which hooks closing | ||||
Do not instantiate outside of the vfs layer. | Do not instantiate outside of the vfs layer. | ||||
""" | """ | ||||
def __init__(self, fh): | def __init__(self, fh): | ||||
object.__setattr__(self, '_origfh', fh) | object.__setattr__(self, '_origfh', fh) | ||||
def __exit__(self, exc_type, exc_value, exc_tb): | def __exit__(self, exc_type, exc_value, exc_tb): | ||||
self._closer.close(self._origfh) | self._closer.close(self._origfh) | ||||
def close(self): | def close(self): | ||||
self._closer.close(self._origfh) | self._closer.close(self._origfh) | ||||
class backgroundfilecloser(object): | class backgroundfilecloser: | ||||
"""Coordinates background closing of file handles on multiple threads.""" | """Coordinates background closing of file handles on multiple threads.""" | ||||
def __init__(self, ui, expectedcount=-1): | def __init__(self, ui, expectedcount=-1): | ||||
self._running = False | self._running = False | ||||
self._entered = False | self._entered = False | ||||
self._threads = [] | self._threads = [] | ||||
self._threadexception = None | self._threadexception = None | ||||
spawndetached = win32.spawndetached | spawndetached = win32.spawndetached | ||||
split = os.path.split | split = os.path.split | ||||
testpid = win32.testpid | testpid = win32.testpid | ||||
unlink = win32.unlink | unlink = win32.unlink | ||||
umask = 0o022 | umask = 0o022 | ||||
class mixedfilemodewrapper(object): | class mixedfilemodewrapper: | ||||
"""Wraps a file handle when it is opened in read/write mode. | """Wraps a file handle when it is opened in read/write mode. | ||||
fopen() and fdopen() on Windows have a specific-to-Windows requirement | fopen() and fdopen() on Windows have a specific-to-Windows requirement | ||||
that files opened with mode r+, w+, or a+ make a call to a file positioning | that files opened with mode r+, w+, or a+ make a call to a file positioning | ||||
function when switching between reads and writes. Without this extra call, | function when switching between reads and writes. Without this extra call, | ||||
Python will raise a not very intuitive "IOError: [Errno 0] Error." | Python will raise a not very intuitive "IOError: [Errno 0] Error." | ||||
This class wraps posixfile instances when the file is opened in read/write | This class wraps posixfile instances when the file is opened in read/write | ||||
def readlines(self, *args, **kwargs): | def readlines(self, *args, **kwargs): | ||||
if self._lastop == self.OPWRITE: | if self._lastop == self.OPWRITE: | ||||
self._noopseek() | self._noopseek() | ||||
object.__setattr__(self, '_lastop', self.OPREAD) | object.__setattr__(self, '_lastop', self.OPREAD) | ||||
return self._fp.readlines(*args, **kwargs) | return self._fp.readlines(*args, **kwargs) | ||||
class fdproxy(object): | class fdproxy: | ||||
"""Wraps osutil.posixfile() to override the name attribute to reflect the | """Wraps osutil.posixfile() to override the name attribute to reflect the | ||||
underlying file name. | underlying file name. | ||||
""" | """ | ||||
def __init__(self, name, fp): | def __init__(self, name, fp): | ||||
self.name = name | self.name = name | ||||
self._fp = fp | self._fp = fp | ||||
pw = pw[:-1] | pw = pw[:-1] | ||||
else: | else: | ||||
pw = pw + c | pw = pw + c | ||||
msvcrt.putwch(u'\r') # pytype: disable=module-attr | msvcrt.putwch(u'\r') # pytype: disable=module-attr | ||||
msvcrt.putwch(u'\n') # pytype: disable=module-attr | msvcrt.putwch(u'\n') # pytype: disable=module-attr | ||||
return encoding.unitolocal(pw) | return encoding.unitolocal(pw) | ||||
class winstdout(object): | class winstdout: | ||||
"""Some files on Windows misbehave. | """Some files on Windows misbehave. | ||||
When writing to a broken pipe, EINVAL instead of EPIPE may be raised. | When writing to a broken pipe, EINVAL instead of EPIPE may be raised. | ||||
When writing too many bytes to a console at the same, a "Not enough space" | When writing too many bytes to a console at the same, a "Not enough space" | ||||
error may happen. Python 3 already works around that. | error may happen. Python 3 already works around that. | ||||
""" | """ | ||||
# Don't support groups on Windows for now | # Don't support groups on Windows for now | ||||
raise KeyError | raise KeyError | ||||
def isexec(f): | def isexec(f): | ||||
return False | return False | ||||
class cachestat(object): | class cachestat: | ||||
def __init__(self, path): | def __init__(self, path): | ||||
pass | pass | ||||
def cacheable(self): | def cacheable(self): | ||||
return False | return False | ||||
def lookupreg(key, valname=None, scope=None): | def lookupreg(key, valname=None, scope=None): |
if value & val: | if value & val: | ||||
flags.append(namemap.get(val, b'<unknown 0x%02x>' % val)) | flags.append(namemap.get(val, b'<unknown 0x%02x>' % val)) | ||||
val <<= 1 | val <<= 1 | ||||
return b'|'.join(flags) | return b'|'.join(flags) | ||||
@attr.s(slots=True) | @attr.s(slots=True) | ||||
class frameheader(object): | class frameheader: | ||||
"""Represents the data in a frame header.""" | """Represents the data in a frame header.""" | ||||
length = attr.ib() | length = attr.ib() | ||||
requestid = attr.ib() | requestid = attr.ib() | ||||
streamid = attr.ib() | streamid = attr.ib() | ||||
streamflags = attr.ib() | streamflags = attr.ib() | ||||
typeid = attr.ib() | typeid = attr.ib() | ||||
flags = attr.ib() | flags = attr.ib() | ||||
@attr.s(slots=True, repr=False) | @attr.s(slots=True, repr=False) | ||||
class frame(object): | class frame: | ||||
"""Represents a parsed frame.""" | """Represents a parsed frame.""" | ||||
requestid = attr.ib() | requestid = attr.ib() | ||||
streamid = attr.ib() | streamid = attr.ib() | ||||
streamflags = attr.ib() | streamflags = attr.ib() | ||||
typeid = attr.ib() | typeid = attr.ib() | ||||
flags = attr.ib() | flags = attr.ib() | ||||
payload = attr.ib() | payload = attr.ib() | ||||
yield stream.makeframe( | yield stream.makeframe( | ||||
requestid=requestid, | requestid=requestid, | ||||
typeid=FRAME_TYPE_TEXT_OUTPUT, | typeid=FRAME_TYPE_TEXT_OUTPUT, | ||||
flags=0, | flags=0, | ||||
payload=payload, | payload=payload, | ||||
) | ) | ||||
class bufferingcommandresponseemitter(object): | class bufferingcommandresponseemitter: | ||||
"""Helper object to emit command response frames intelligently. | """Helper object to emit command response frames intelligently. | ||||
Raw command response data is likely emitted in chunks much smaller | Raw command response data is likely emitted in chunks much smaller | ||||
than what can fit in a single frame. This class exists to buffer | than what can fit in a single frame. This class exists to buffer | ||||
chunks until enough data is available to fit in a single frame. | chunks until enough data is available to fit in a single frame. | ||||
TODO we'll need something like this when compression is supported. | TODO we'll need something like this when compression is supported. | ||||
So it might make sense to implement this functionality at the stream | So it might make sense to implement this functionality at the stream | ||||
encoded=True, | encoded=True, | ||||
) | ) | ||||
# TODO consider defining encoders/decoders using the util.compressionengine | # TODO consider defining encoders/decoders using the util.compressionengine | ||||
# mechanism. | # mechanism. | ||||
class identityencoder(object): | class identityencoder: | ||||
"""Encoder for the "identity" stream encoding profile.""" | """Encoder for the "identity" stream encoding profile.""" | ||||
def __init__(self, ui): | def __init__(self, ui): | ||||
pass | pass | ||||
def encode(self, data): | def encode(self, data): | ||||
return data | return data | ||||
def flush(self): | def flush(self): | ||||
return b'' | return b'' | ||||
def finish(self): | def finish(self): | ||||
return b'' | return b'' | ||||
class identitydecoder(object): | class identitydecoder: | ||||
"""Decoder for the "identity" stream encoding profile.""" | """Decoder for the "identity" stream encoding profile.""" | ||||
def __init__(self, ui, extraobjs): | def __init__(self, ui, extraobjs): | ||||
if extraobjs: | if extraobjs: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'identity decoder received unexpected additional values') | _(b'identity decoder received unexpected additional values') | ||||
) | ) | ||||
def decode(self, data): | def decode(self, data): | ||||
return data | return data | ||||
class zlibencoder(object): | class zlibencoder: | ||||
def __init__(self, ui): | def __init__(self, ui): | ||||
import zlib | import zlib | ||||
self._zlib = zlib | self._zlib = zlib | ||||
self._compressor = zlib.compressobj() | self._compressor = zlib.compressobj() | ||||
def encode(self, data): | def encode(self, data): | ||||
return self._compressor.compress(data) | return self._compressor.compress(data) | ||||
def flush(self): | def flush(self): | ||||
# Z_SYNC_FLUSH doesn't reset compression context, which is | # Z_SYNC_FLUSH doesn't reset compression context, which is | ||||
# what we want. | # what we want. | ||||
return self._compressor.flush(self._zlib.Z_SYNC_FLUSH) | return self._compressor.flush(self._zlib.Z_SYNC_FLUSH) | ||||
def finish(self): | def finish(self): | ||||
res = self._compressor.flush(self._zlib.Z_FINISH) | res = self._compressor.flush(self._zlib.Z_FINISH) | ||||
self._compressor = None | self._compressor = None | ||||
return res | return res | ||||
class zlibdecoder(object): | class zlibdecoder: | ||||
def __init__(self, ui, extraobjs): | def __init__(self, ui, extraobjs): | ||||
import zlib | import zlib | ||||
if extraobjs: | if extraobjs: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'zlib decoder received unexpected additional values') | _(b'zlib decoder received unexpected additional values') | ||||
) | ) | ||||
self._decompressor = zlib.decompressobj() | self._decompressor = zlib.decompressobj() | ||||
def decode(self, data): | def decode(self, data): | ||||
return self._decompressor.decompress(data) | return self._decompressor.decompress(data) | ||||
class zstdbaseencoder(object): | class zstdbaseencoder: | ||||
def __init__(self, level): | def __init__(self, level): | ||||
from . import zstd | from . import zstd | ||||
self._zstd = zstd | self._zstd = zstd | ||||
cctx = zstd.ZstdCompressor(level=level) | cctx = zstd.ZstdCompressor(level=level) | ||||
self._compressor = cctx.compressobj() | self._compressor = cctx.compressobj() | ||||
def encode(self, data): | def encode(self, data): | ||||
return res | return res | ||||
class zstd8mbencoder(zstdbaseencoder): | class zstd8mbencoder(zstdbaseencoder): | ||||
def __init__(self, ui): | def __init__(self, ui): | ||||
super(zstd8mbencoder, self).__init__(3) | super(zstd8mbencoder, self).__init__(3) | ||||
class zstdbasedecoder(object): | class zstdbasedecoder: | ||||
def __init__(self, maxwindowsize): | def __init__(self, maxwindowsize): | ||||
from . import zstd | from . import zstd | ||||
dctx = zstd.ZstdDecompressor(max_window_size=maxwindowsize) | dctx = zstd.ZstdDecompressor(max_window_size=maxwindowsize) | ||||
self._decompressor = dctx.decompressobj() | self._decompressor = dctx.decompressobj() | ||||
def decode(self, data): | def decode(self, data): | ||||
return self._decompressor.decompress(data) | return self._decompressor.decompress(data) | ||||
STREAM_ENCODERS[b'zlib'] = (zlibencoder, zlibdecoder) | STREAM_ENCODERS[b'zlib'] = (zlibencoder, zlibdecoder) | ||||
STREAM_ENCODERS_ORDER.append(b'zlib') | STREAM_ENCODERS_ORDER.append(b'zlib') | ||||
STREAM_ENCODERS[b'identity'] = (identityencoder, identitydecoder) | STREAM_ENCODERS[b'identity'] = (identityencoder, identitydecoder) | ||||
STREAM_ENCODERS_ORDER.append(b'identity') | STREAM_ENCODERS_ORDER.append(b'identity') | ||||
class stream(object): | class stream: | ||||
"""Represents a logical unidirectional series of frames.""" | """Represents a logical unidirectional series of frames.""" | ||||
def __init__(self, streamid, active=False): | def __init__(self, streamid, active=False): | ||||
self.streamid = streamid | self.streamid = streamid | ||||
self._active = active | self._active = active | ||||
def makeframe(self, requestid, typeid, flags, payload): | def makeframe(self, requestid, typeid, flags, payload): | ||||
"""Create a frame to be sent out over this stream. | """Create a frame to be sent out over this stream. | ||||
) | ) | ||||
DEFAULT_PROTOCOL_SETTINGS = { | DEFAULT_PROTOCOL_SETTINGS = { | ||||
b'contentencodings': [b'identity'], | b'contentencodings': [b'identity'], | ||||
} | } | ||||
class serverreactor(object): | class serverreactor: | ||||
"""Holds state of a server handling frame-based protocol requests. | """Holds state of a server handling frame-based protocol requests. | ||||
This class is the "brain" of the unified frame-based protocol server | This class is the "brain" of the unified frame-based protocol server | ||||
component. While the protocol is stateless from the perspective of | component. While the protocol is stateless from the perspective of | ||||
requests/commands, something needs to track which frames have been | requests/commands, something needs to track which frames have been | ||||
received, what frames to expect, etc. This class is that thing. | received, what frames to expect, etc. This class is that thing. | ||||
Instances are modeled as a state machine of sorts. Instances are also | Instances are modeled as a state machine of sorts. Instances are also | ||||
else: | else: | ||||
self._state = b'errored' | self._state = b'errored' | ||||
return self._makeerrorresult(_(b'command data frame without flags')) | return self._makeerrorresult(_(b'command data frame without flags')) | ||||
def _onframeerrored(self, frame): | def _onframeerrored(self, frame): | ||||
return self._makeerrorresult(_(b'server already errored')) | return self._makeerrorresult(_(b'server already errored')) | ||||
class commandrequest(object): | class commandrequest: | ||||
"""Represents a request to run a command.""" | """Represents a request to run a command.""" | ||||
def __init__(self, requestid, name, args, datafh=None, redirect=None): | def __init__(self, requestid, name, args, datafh=None, redirect=None): | ||||
self.requestid = requestid | self.requestid = requestid | ||||
self.name = name | self.name = name | ||||
self.args = args | self.args = args | ||||
self.datafh = datafh | self.datafh = datafh | ||||
self.redirect = redirect | self.redirect = redirect | ||||
self.state = b'pending' | self.state = b'pending' | ||||
class clientreactor(object): | class clientreactor: | ||||
"""Holds state of a client issuing frame-based protocol requests. | """Holds state of a client issuing frame-based protocol requests. | ||||
This is like ``serverreactor`` but for client-side state. | This is like ``serverreactor`` but for client-side state. | ||||
Each instance is bound to the lifetime of a connection. For persistent | Each instance is bound to the lifetime of a connection. For persistent | ||||
connection transports using e.g. TCP sockets and speaking the raw | connection transports using e.g. TCP sockets and speaking the raw | ||||
framing protocol, there will be a single instance for the lifetime of | framing protocol, there will be a single instance for the lifetime of | ||||
the TCP socket. For transports where there are multiple discrete | the TCP socket. For transports where there are multiple discrete |
break | break | ||||
chunks.append(pycompat.bytesurl(v)) | chunks.append(pycompat.bytesurl(v)) | ||||
i += 1 | i += 1 | ||||
return b''.join(chunks) | return b''.join(chunks) | ||||
@interfaceutil.implementer(wireprototypes.baseprotocolhandler) | @interfaceutil.implementer(wireprototypes.baseprotocolhandler) | ||||
class httpv1protocolhandler(object): | class httpv1protocolhandler: | ||||
def __init__(self, req, ui, checkperm): | def __init__(self, req, ui, checkperm): | ||||
self._req = req | self._req = req | ||||
self._ui = ui | self._ui = ui | ||||
self._checkperm = checkperm | self._checkperm = checkperm | ||||
self._protocaps = None | self._protocaps = None | ||||
@property | @property | ||||
def name(self): | def name(self): | ||||
def _sshv1respondooberror(fout, ferr, rsp): | def _sshv1respondooberror(fout, ferr, rsp): | ||||
ferr.write(b'%s\n-\n' % rsp) | ferr.write(b'%s\n-\n' % rsp) | ||||
ferr.flush() | ferr.flush() | ||||
fout.write(b'\n') | fout.write(b'\n') | ||||
fout.flush() | fout.flush() | ||||
@interfaceutil.implementer(wireprototypes.baseprotocolhandler) | @interfaceutil.implementer(wireprototypes.baseprotocolhandler) | ||||
class sshv1protocolhandler(object): | class sshv1protocolhandler: | ||||
"""Handler for requests services via version 1 of SSH protocol.""" | """Handler for requests services via version 1 of SSH protocol.""" | ||||
def __init__(self, ui, fin, fout): | def __init__(self, ui, fin, fout): | ||||
self._ui = ui | self._ui = ui | ||||
self._fin = fin | self._fin = fin | ||||
self._fout = fout | self._fout = fout | ||||
self._protocaps = set() | self._protocaps = set() | ||||
break | break | ||||
else: | else: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'unhandled ssh server state: %s' % state | b'unhandled ssh server state: %s' % state | ||||
) | ) | ||||
class sshserver(object): | class sshserver: | ||||
def __init__(self, ui, repo, logfh=None): | def __init__(self, ui, repo, logfh=None): | ||||
self._ui = ui | self._ui = ui | ||||
self._repo = repo | self._repo = repo | ||||
self._fin, self._fout = ui.protectfinout() | self._fin, self._fout = ui.protectfinout() | ||||
# Log write I/O to stdout and stderr if configured. | # Log write I/O to stdout and stderr if configured. | ||||
if logfh: | if logfh: | ||||
self._fout = util.makeloggingfileobject( | self._fout = util.makeloggingfileobject( |
}, | }, | ||||
b'http-v1': { | b'http-v1': { | ||||
b'transport': b'http', | b'transport': b'http', | ||||
b'version': 1, | b'version': 1, | ||||
}, | }, | ||||
} | } | ||||
class bytesresponse(object): | class bytesresponse: | ||||
"""A wire protocol response consisting of raw bytes.""" | """A wire protocol response consisting of raw bytes.""" | ||||
def __init__(self, data): | def __init__(self, data): | ||||
self.data = data | self.data = data | ||||
class ooberror(object): | class ooberror: | ||||
"""wireproto reply: failure of a batch of operation | """wireproto reply: failure of a batch of operation | ||||
Something failed during a batch call. The error message is stored in | Something failed during a batch call. The error message is stored in | ||||
`self.message`. | `self.message`. | ||||
""" | """ | ||||
def __init__(self, message): | def __init__(self, message): | ||||
self.message = message | self.message = message | ||||
class pushres(object): | class pushres: | ||||
"""wireproto reply: success with simple integer return | """wireproto reply: success with simple integer return | ||||
The call was successful and returned an integer contained in `self.res`. | The call was successful and returned an integer contained in `self.res`. | ||||
""" | """ | ||||
def __init__(self, res, output): | def __init__(self, res, output): | ||||
self.res = res | self.res = res | ||||
self.output = output | self.output = output | ||||
class pusherr(object): | class pusherr: | ||||
"""wireproto reply: failure | """wireproto reply: failure | ||||
The call failed. The `self.res` attribute contains the error message. | The call failed. The `self.res` attribute contains the error message. | ||||
""" | """ | ||||
def __init__(self, res, output): | def __init__(self, res, output): | ||||
self.res = res | self.res = res | ||||
self.output = output | self.output = output | ||||
class streamres(object): | class streamres: | ||||
"""wireproto reply: binary stream | """wireproto reply: binary stream | ||||
The call was successful and the result is a stream. | The call was successful and the result is a stream. | ||||
Accepts a generator containing chunks of data to be sent to the client. | Accepts a generator containing chunks of data to be sent to the client. | ||||
``prefer_uncompressed`` indicates that the data is expected to be | ``prefer_uncompressed`` indicates that the data is expected to be | ||||
uncompressable and that the stream should therefore use the ``none`` | uncompressable and that the stream should therefore use the ``none`` | ||||
engine. | engine. | ||||
""" | """ | ||||
def __init__(self, gen=None, prefer_uncompressed=False): | def __init__(self, gen=None, prefer_uncompressed=False): | ||||
self.gen = gen | self.gen = gen | ||||
self.prefer_uncompressed = prefer_uncompressed | self.prefer_uncompressed = prefer_uncompressed | ||||
class streamreslegacy(object): | class streamreslegacy: | ||||
"""wireproto reply: uncompressed binary stream | """wireproto reply: uncompressed binary stream | ||||
The call was successful and the result is a stream. | The call was successful and the result is a stream. | ||||
Accepts a generator containing chunks of data to be sent to the client. | Accepts a generator containing chunks of data to be sent to the client. | ||||
Like ``streamres``, but sends an uncompressed data for "version 1" clients | Like ``streamres``, but sends an uncompressed data for "version 1" clients | ||||
using the application/mercurial-0.1 media type. | using the application/mercurial-0.1 media type. | ||||
"""Validate that the client has permissions to perform a request. | """Validate that the client has permissions to perform a request. | ||||
The argument is the permission required to proceed. If the client | The argument is the permission required to proceed. If the client | ||||
doesn't have that permission, the exception should raise or abort | doesn't have that permission, the exception should raise or abort | ||||
in a protocol specific manner. | in a protocol specific manner. | ||||
""" | """ | ||||
class commandentry(object): | class commandentry: | ||||
"""Represents a declared wire protocol command.""" | """Represents a declared wire protocol command.""" | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
func, | func, | ||||
args=b'', | args=b'', | ||||
transports=None, | transports=None, | ||||
permission=b'push', | permission=b'push', | ||||
hint=_(b'usable compression engines: %s') | hint=_(b'usable compression engines: %s') | ||||
% b', '.sorted(validnames), # pytype: disable=attribute-error | % b', '.sorted(validnames), # pytype: disable=attribute-error | ||||
) | ) | ||||
return compengines | return compengines | ||||
@attr.s | @attr.s | ||||
class encodedresponse(object): | class encodedresponse: | ||||
"""Represents response data that is already content encoded. | """Represents response data that is already content encoded. | ||||
Wire protocol version 2 only. | Wire protocol version 2 only. | ||||
Commands typically emit Python objects that are encoded and sent over the | Commands typically emit Python objects that are encoded and sent over the | ||||
wire. If commands emit an object of this type, the encoding step is bypassed | wire. If commands emit an object of this type, the encoding step is bypassed | ||||
and the content from this object is used instead. | and the content from this object is used instead. | ||||
""" | """ | ||||
data = attr.ib() | data = attr.ib() | ||||
@attr.s | @attr.s | ||||
class alternatelocationresponse(object): | class alternatelocationresponse: | ||||
"""Represents a response available at an alternate location. | """Represents a response available at an alternate location. | ||||
Instances are sent in place of actual response objects when the server | Instances are sent in place of actual response objects when the server | ||||
is sending a "content redirect" response. | is sending a "content redirect" response. | ||||
Only compatible with wire protocol version 2. | Only compatible with wire protocol version 2. | ||||
""" | """ | ||||
url = attr.ib() | url = attr.ib() | ||||
mediatype = attr.ib() | mediatype = attr.ib() | ||||
size = attr.ib(default=None) | size = attr.ib(default=None) | ||||
fullhashes = attr.ib(default=None) | fullhashes = attr.ib(default=None) | ||||
fullhashseed = attr.ib(default=None) | fullhashseed = attr.ib(default=None) | ||||
serverdercerts = attr.ib(default=None) | serverdercerts = attr.ib(default=None) | ||||
servercadercerts = attr.ib(default=None) | servercadercerts = attr.ib(default=None) | ||||
@attr.s | @attr.s | ||||
class indefinitebytestringresponse(object): | class indefinitebytestringresponse: | ||||
"""Represents an object to be encoded to an indefinite length bytestring. | """Represents an object to be encoded to an indefinite length bytestring. | ||||
Instances are initialized from an iterable of chunks, with each chunk being | Instances are initialized from an iterable of chunks, with each chunk being | ||||
a bytes instance. | a bytes instance. | ||||
""" | """ | ||||
chunks = attr.ib() | chunks = attr.ib() |
# This looks like it will infinitely recurse. However, | # This looks like it will infinitely recurse. However, | ||||
# sendcommands() should modify __class__. This call serves as a check | # sendcommands() should modify __class__. This call serves as a check | ||||
# on that. | # on that. | ||||
return self.result(timeout) | return self.result(timeout) | ||||
@interfaceutil.implementer(repository.ipeercommandexecutor) | @interfaceutil.implementer(repository.ipeercommandexecutor) | ||||
class peerexecutor(object): | class peerexecutor: | ||||
def __init__(self, peer): | def __init__(self, peer): | ||||
self._peer = peer | self._peer = peer | ||||
self._sent = False | self._sent = False | ||||
self._closed = False | self._closed = False | ||||
self._calls = [] | self._calls = [] | ||||
self._futures = weakref.WeakSet() | self._futures = weakref.WeakSet() | ||||
self._responseexecutor = None | self._responseexecutor = None | ||||
self._responsef = None | self._responsef = None |
raise error.Abort(_(b'number of cpus must be an integer')) | raise error.Abort(_(b'number of cpus must be an integer')) | ||||
return min(max(countcpus(), 4), 32) | return min(max(countcpus(), 4), 32) | ||||
def ismainthread(): | def ismainthread(): | ||||
return threading.current_thread() == threading.main_thread() | return threading.current_thread() == threading.main_thread() | ||||
class _blockingreader(object): | class _blockingreader: | ||||
def __init__(self, wrapped): | def __init__(self, wrapped): | ||||
self._wrapped = wrapped | self._wrapped = wrapped | ||||
# Do NOT implement readinto() by making it delegate to | # Do NOT implement readinto() by making it delegate to | ||||
# _wrapped.readinto(), since that is unbuffered. The unpickler is fine | # _wrapped.readinto(), since that is unbuffered. The unpickler is fine | ||||
# with just read() and readline(), so we don't need to implement it. | # with just read() and readline(), so we don't need to implement it. | ||||
def readline(self): | def readline(self): |
def runcmd(cmd, env, cwd=None): | def runcmd(cmd, env, cwd=None): | ||||
p = subprocess.Popen( | p = subprocess.Popen( | ||||
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=cwd | cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=cwd | ||||
) | ) | ||||
out, err = p.communicate() | out, err = p.communicate() | ||||
return p.returncode, out, err | return p.returncode, out, err | ||||
class hgcommand(object): | class hgcommand: | ||||
def __init__(self, cmd, env): | def __init__(self, cmd, env): | ||||
self.cmd = cmd | self.cmd = cmd | ||||
self.env = env | self.env = env | ||||
def run(self, args): | def run(self, args): | ||||
cmd = self.cmd + args | cmd = self.cmd + args | ||||
returncode, out, err = runcmd(cmd, self.env) | returncode, out, err = runcmd(cmd, self.env) | ||||
err = filterhgerr(err) | err = filterhgerr(err) | ||||
except ValueError: | except ValueError: | ||||
pass | pass | ||||
cygwinccompiler.Mingw32CCompiler = HackedMingw32CCompiler | cygwinccompiler.Mingw32CCompiler = HackedMingw32CCompiler | ||||
except ImportError: | except ImportError: | ||||
# the cygwinccompiler package is not available on some Python | # the cygwinccompiler package is not available on some Python | ||||
# distributions like the ones from the optware project for Synology | # distributions like the ones from the optware project for Synology | ||||
# DiskStation boxes | # DiskStation boxes | ||||
class HackedMingw32CCompiler(object): | class HackedMingw32CCompiler: | ||||
pass | pass | ||||
if os.name == 'nt': | if os.name == 'nt': | ||||
# Allow compiler/linker flags to be added to Visual Studio builds. Passing | # Allow compiler/linker flags to be added to Visual Studio builds. Passing | ||||
# extra_link_args to distutils.extensions.Extension() doesn't have any | # extra_link_args to distutils.extensions.Extension() doesn't have any | ||||
# effect. | # effect. | ||||
from distutils import msvccompiler | from distutils import msvccompiler |
if ch == b'#': # comment | if ch == b'#': # comment | ||||
break | break | ||||
if _isname(ch): | if _isname(ch): | ||||
edges[getname(y, x)] += parents(y, x) | edges[getname(y, x)] += parents(y, x) | ||||
return dict(edges) | return dict(edges) | ||||
class simplefilectx(object): | class simplefilectx: | ||||
def __init__(self, path, data): | def __init__(self, path, data): | ||||
self._data = data | self._data = data | ||||
self._path = path | self._path = path | ||||
def data(self): | def data(self): | ||||
return self._data | return self._data | ||||
def filenode(self): | def filenode(self): |
class _httprequesthandler(httpserver.simplehttprequesthandler): | class _httprequesthandler(httpserver.simplehttprequesthandler): | ||||
def log_message(self, format, *args): | def log_message(self, format, *args): | ||||
httpserver.simplehttprequesthandler.log_message(self, format, *args) | httpserver.simplehttprequesthandler.log_message(self, format, *args) | ||||
sys.stderr.flush() | sys.stderr.flush() | ||||
class simplehttpservice(object): | class simplehttpservice: | ||||
def __init__(self, host, port): | def __init__(self, host, port): | ||||
self.address = (host, port) | self.address = (host, port) | ||||
def init(self): | def init(self): | ||||
self.httpd = simplehttpserver(self.address, _httprequesthandler) | self.httpd = simplehttpserver(self.address, _httprequesthandler) | ||||
def run(self): | def run(self): | ||||
self.httpd.serve_forever() | self.httpd.serve_forever() |
for elt in l: | for elt in l: | ||||
k, v = elt.split(b'=', 1) | k, v = elt.split(b'=', 1) | ||||
if v[0:1] == b'"' and v[-1:] == b'"': | if v[0:1] == b'"' and v[-1:] == b'"': | ||||
v = v[1:-1] | v = v[1:-1] | ||||
parsed[k] = v | parsed[k] = v | ||||
return parsed | return parsed | ||||
class digestauthserver(object): | class digestauthserver: | ||||
def __init__(self): | def __init__(self): | ||||
self._user_hashes = {} | self._user_hashes = {} | ||||
def gethashers(self): | def gethashers(self): | ||||
def _md5sum(x): | def _md5sum(x): | ||||
m = hashlib.md5() | m = hashlib.md5() | ||||
m.update(x) | m.update(x) | ||||
return node.hex(m.digest()) | return node.hex(m.digest()) |
import os | import os | ||||
import time | import time | ||||
class mocktime(object): | class mocktime: | ||||
def __init__(self, increment): | def __init__(self, increment): | ||||
self.time = 0 | self.time = 0 | ||||
self.increment = [float(s) for s in increment.split()] | self.increment = [float(s) for s in increment.split()] | ||||
self.pos = 0 | self.pos = 0 | ||||
def __call__(self): | def __call__(self): | ||||
self.time += self.increment[self.pos % len(self.increment)] | self.time += self.increment[self.pos % len(self.increment)] | ||||
self.pos += 1 | self.pos += 1 | ||||
return self.time | return self.time | ||||
def uisetup(ui): | def uisetup(ui): | ||||
time.time = mocktime(os.environ.get('MOCKTIME', '0.1')) | time.time = mocktime(os.environ.get('MOCKTIME', '0.1')) |
return p.decode('utf-8') | return p.decode('utf-8') | ||||
osenvironb = getattr(os, 'environb', None) | osenvironb = getattr(os, 'environb', None) | ||||
if osenvironb is None: | if osenvironb is None: | ||||
# Windows lacks os.environb, for instance. A proxy over the real thing | # Windows lacks os.environb, for instance. A proxy over the real thing | ||||
# instead of a copy allows the environment to be updated via bytes on | # instead of a copy allows the environment to be updated via bytes on | ||||
# all platforms. | # all platforms. | ||||
class environbytes(object): | class environbytes: | ||||
def __init__(self, strenv): | def __init__(self, strenv): | ||||
self.__len__ = strenv.__len__ | self.__len__ = strenv.__len__ | ||||
self.clear = strenv.clear | self.clear = strenv.clear | ||||
self._strenv = strenv | self._strenv = strenv | ||||
def __getitem__(self, k): | def __getitem__(self, k): | ||||
v = self._strenv.__getitem__(_bytes2sys(k)) | v = self._strenv.__getitem__(_bytes2sys(k)) | ||||
return _sys2bytes(v) | return _sys2bytes(v) | ||||
if f.endswith(b'.py'): | if f.endswith(b'.py'): | ||||
val /= 10.0 | val /= 10.0 | ||||
perf[f] = val / 1000.0 | perf[f] = val / 1000.0 | ||||
return perf[f] | return perf[f] | ||||
testdescs.sort(key=sortkey) | testdescs.sort(key=sortkey) | ||||
class TestRunner(object): | class TestRunner: | ||||
"""Holds context for executing tests. | """Holds context for executing tests. | ||||
Tests rely on a lot of state. This object holds it for them. | Tests rely on a lot of state. This object holds it for them. | ||||
""" | """ | ||||
# Programs required to run tests. | # Programs required to run tests. | ||||
REQUIREDTOOLS = [ | REQUIREDTOOLS = [ | ||||
b'diff', | b'diff', |
class simplestoreerror(error.StorageError): | class simplestoreerror(error.StorageError): | ||||
pass | pass | ||||
@interfaceutil.implementer(repository.irevisiondelta) | @interfaceutil.implementer(repository.irevisiondelta) | ||||
@attr.s(slots=True) | @attr.s(slots=True) | ||||
class simplestorerevisiondelta(object): | class simplestorerevisiondelta: | ||||
node = attr.ib() | node = attr.ib() | ||||
p1node = attr.ib() | p1node = attr.ib() | ||||
p2node = attr.ib() | p2node = attr.ib() | ||||
basenode = attr.ib() | basenode = attr.ib() | ||||
flags = attr.ib() | flags = attr.ib() | ||||
baserevisionsize = attr.ib() | baserevisionsize = attr.ib() | ||||
revision = attr.ib() | revision = attr.ib() | ||||
delta = attr.ib() | delta = attr.ib() | ||||
linknode = attr.ib(default=None) | linknode = attr.ib(default=None) | ||||
@interfaceutil.implementer(repository.iverifyproblem) | @interfaceutil.implementer(repository.iverifyproblem) | ||||
@attr.s(frozen=True) | @attr.s(frozen=True) | ||||
class simplefilestoreproblem(object): | class simplefilestoreproblem: | ||||
warning = attr.ib(default=None) | warning = attr.ib(default=None) | ||||
error = attr.ib(default=None) | error = attr.ib(default=None) | ||||
node = attr.ib(default=None) | node = attr.ib(default=None) | ||||
@interfaceutil.implementer(repository.ifilestorage) | @interfaceutil.implementer(repository.ifilestorage) | ||||
class filestorage(object): | class filestorage: | ||||
"""Implements storage for a tracked path. | """Implements storage for a tracked path. | ||||
Data is stored in the VFS in a directory corresponding to the tracked | Data is stored in the VFS in a directory corresponding to the tracked | ||||
path. | path. | ||||
Index data is stored in an ``index`` file using CBOR. | Index data is stored in an ``index`` file using CBOR. | ||||
Fulltext data is stored in files having names of the node. | Fulltext data is stored in files having names of the node. |
import itertools | import itertools | ||||
from mercurial import pycompat | from mercurial import pycompat | ||||
from hgext import absorb | from hgext import absorb | ||||
class simplefctx(object): | class simplefctx: | ||||
def __init__(self, content): | def __init__(self, content): | ||||
self.content = content | self.content = content | ||||
def data(self): | def data(self): | ||||
return self.content | return self.content | ||||
def insertreturns(x): | def insertreturns(x): |
ancs[i] = {i} | ancs[i] = {i} | ||||
if graph[i] == [nullrev]: | if graph[i] == [nullrev]: | ||||
continue | continue | ||||
for p in graph[i]: | for p in graph[i]: | ||||
ancs[i].update(ancs[p]) | ancs[i].update(ancs[p]) | ||||
return ancs | return ancs | ||||
class naiveincrementalmissingancestors(object): | class naiveincrementalmissingancestors: | ||||
def __init__(self, ancs, bases): | def __init__(self, ancs, bases): | ||||
self.ancs = ancs | self.ancs = ancs | ||||
self.bases = set(bases) | self.bases = set(bases) | ||||
def addbases(self, newbases): | def addbases(self, newbases): | ||||
self.bases.update(newbases) | self.bases.update(newbases) | ||||
def removeancestorsfrom(self, revs): | def removeancestorsfrom(self, revs): |
) | ) | ||||
def bprint(*bs): | def bprint(*bs): | ||||
print(*[pycompat.sysstr(b) for b in bs]) | print(*[pycompat.sysstr(b) for b in bs]) | ||||
# equivalent of repo.repository | # equivalent of repo.repository | ||||
class thing(object): | class thing: | ||||
def hello(self): | def hello(self): | ||||
return b"Ready." | return b"Ready." | ||||
# equivalent of localrepo.localrepository | # equivalent of localrepo.localrepository | ||||
class localthing(thing): | class localthing(thing): | ||||
def foo(self, one, two=None): | def foo(self, one, two=None): | ||||
if one: | if one: | ||||
.replace(b':,', b',') | .replace(b':,', b',') | ||||
.replace(b'::', b':') | .replace(b'::', b':') | ||||
) | ) | ||||
# server side | # server side | ||||
# equivalent of wireproto's global functions | # equivalent of wireproto's global functions | ||||
class server(object): | class server: | ||||
def __init__(self, local): | def __init__(self, local): | ||||
self.local = local | self.local = local | ||||
def _call(self, name, args): | def _call(self, name, args): | ||||
args = dict(arg.split(b'=', 1) for arg in args) | args = dict(arg.split(b'=', 1) for arg in args) | ||||
return getattr(self, name)(**args) | return getattr(self, name)(**args) | ||||
def perform(self, req): | def perform(self, req): |
for attr in sorted(public - allowed): | for attr in sorted(public - allowed): | ||||
print( | print( | ||||
'public attribute not declared in interfaces: %s.%s' | 'public attribute not declared in interfaces: %s.%s' | ||||
% (o.__class__.__name__, attr) | % (o.__class__.__name__, attr) | ||||
) | ) | ||||
# Facilitates testing localpeer. | # Facilitates testing localpeer. | ||||
class dummyrepo(object): | class dummyrepo: | ||||
def __init__(self): | def __init__(self): | ||||
self.ui = uimod.ui() | self.ui = uimod.ui() | ||||
self._wanted_sidedata = set() | self._wanted_sidedata = set() | ||||
def filtered(self, name): | def filtered(self, name): | ||||
pass | pass | ||||
def _restrictcapabilities(self, caps): | def _restrictcapabilities(self, caps): | ||||
pass | pass | ||||
class dummyopener(object): | class dummyopener: | ||||
handlers = [] | handlers = [] | ||||
# Facilitates testing sshpeer without requiring a server. | # Facilitates testing sshpeer without requiring a server. | ||||
class badpeer(httppeer.httppeer): | class badpeer(httppeer.httppeer): | ||||
def __init__(self): | def __init__(self): | ||||
super(badpeer, self).__init__( | super(badpeer, self).__init__( | ||||
None, None, None, dummyopener(), None, None | None, None, None, dummyopener(), None, None | ||||
) | ) | ||||
self.badattribute = True | self.badattribute = True | ||||
def badmethod(self): | def badmethod(self): | ||||
pass | pass | ||||
class dummypipe(object): | class dummypipe: | ||||
def close(self): | def close(self): | ||||
pass | pass | ||||
@property | @property | ||||
def closed(self): | def closed(self): | ||||
pass | pass | ||||
def getid(wrapper): | def getid(wrapper): | ||||
return getattr(wrapper, 'x', '-') | return getattr(wrapper, 'x', '-') | ||||
wrappers = [genwrapper(i) for i in range(5)] | wrappers = [genwrapper(i) for i in range(5)] | ||||
class dummyclass(object): | class dummyclass: | ||||
def getstack(self): | def getstack(self): | ||||
return ['orig'] | return ['orig'] | ||||
dummy = dummyclass() | dummy = dummyclass() | ||||
def batchwrap(wrappers): | def batchwrap(wrappers): | ||||
# Bad programmer forgets to unwrap the function, but the context | # Bad programmer forgets to unwrap the function, but the context | ||||
# managers still unwrap their wrappings. | # managers still unwrap their wrappings. | ||||
extensions.wrapfunction(dummy, 'getstack', wrappers[2]) | extensions.wrapfunction(dummy, 'getstack', wrappers[2]) | ||||
print('context manager', dummy.getstack()) | print('context manager', dummy.getstack()) | ||||
print('context manager', dummy.getstack()) | print('context manager', dummy.getstack()) | ||||
print('context manager', dummy.getstack()) | print('context manager', dummy.getstack()) | ||||
# Wrap callable object which has no __name__ | # Wrap callable object which has no __name__ | ||||
class callableobj(object): | class callableobj: | ||||
def __call__(self): | def __call__(self): | ||||
return ['orig'] | return ['orig'] | ||||
dummy.cobj = callableobj() | dummy.cobj = callableobj() | ||||
extensions.wrapfunction(dummy, 'cobj', wrappers[0]) | extensions.wrapfunction(dummy, 'cobj', wrappers[0]) | ||||
print('wrap callable object', dummy.cobj()) | print('wrap callable object', dummy.cobj()) |
# two files should be the same | # two files should be the same | ||||
ensure(len({util.readfile(p) for p in [path, path2]}) == 1) | ensure(len({util.readfile(p) for p in [path, path2]}) == 1) | ||||
os.unlink(path) | os.unlink(path) | ||||
os.unlink(path2) | os.unlink(path2) | ||||
class fakefctx(object): | class fakefctx: | ||||
def __init__(self, node, path=None): | def __init__(self, node, path=None): | ||||
self._node = node | self._node = node | ||||
self._path = path | self._path = path | ||||
def node(self): | def node(self): | ||||
return self._node | return self._node | ||||
def path(self): | def path(self): |
util, | util, | ||||
vfs as vfsmod, | vfs as vfsmod, | ||||
) | ) | ||||
if pycompat.ispy3: | if pycompat.ispy3: | ||||
xrange = range | xrange = range | ||||
class fakerepo(object): | class fakerepo: | ||||
def __init__(self): | def __init__(self): | ||||
self._filecache = {} | self._filecache = {} | ||||
class fakevfs(object): | class fakevfs: | ||||
def join(self, p): | def join(self, p): | ||||
return p | return p | ||||
vfs = fakevfs() | vfs = fakevfs() | ||||
def unfiltered(self): | def unfiltered(self): | ||||
return self | return self | ||||
# Tests to ensure that sha1dc.sha1 is exactly a drop-in for | # Tests to ensure that sha1dc.sha1 is exactly a drop-in for | ||||
# hashlib.sha1 for our needs. | # hashlib.sha1 for our needs. | ||||
import hashlib | import hashlib | ||||
import unittest | import unittest | ||||
import silenttestrunner | import silenttestrunner | ||||
try: | try: | ||||
from mercurial.thirdparty import sha1dc | from mercurial.thirdparty import sha1dc | ||||
except ImportError: | except ImportError: | ||||
sha1dc = None | sha1dc = None | ||||
class hashertestsbase(object): | class hashertestsbase: | ||||
def test_basic_hash(self): | def test_basic_hash(self): | ||||
h = self.hasher() | h = self.hasher() | ||||
h.update(b'foo') | h.update(b'foo') | ||||
self.assertEqual( | self.assertEqual( | ||||
'0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33', h.hexdigest() | '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33', h.hexdigest() | ||||
) | ) | ||||
h.update(b'bar') | h.update(b'bar') | ||||
self.assertEqual( | self.assertEqual( |
# to be earlier | # to be earlier | ||||
self._pidoffset = pidoffset | self._pidoffset = pidoffset | ||||
super(lockwrapper, self).__init__(*args, **kwargs) | super(lockwrapper, self).__init__(*args, **kwargs) | ||||
def _getpid(self): | def _getpid(self): | ||||
return super(lockwrapper, self)._getpid() + self._pidoffset | return super(lockwrapper, self)._getpid() + self._pidoffset | ||||
class teststate(object): | class teststate: | ||||
def __init__(self, testcase, dir, pidoffset=0): | def __init__(self, testcase, dir, pidoffset=0): | ||||
self._testcase = testcase | self._testcase = testcase | ||||
self._acquirecalled = False | self._acquirecalled = False | ||||
self._releasecalled = False | self._releasecalled = False | ||||
self._postreleasecalled = False | self._postreleasecalled = False | ||||
self.vfs = vfsmod.vfs(dir, audit=False) | self.vfs = vfsmod.vfs(dir, audit=False) | ||||
self._pidoffset = pidoffset | self._pidoffset = pidoffset | ||||
xrange(200001), | xrange(200001), | ||||
itertools.cycle((HASH_1, HASH_2)), | itertools.cycle((HASH_1, HASH_2)), | ||||
itertools.cycle((b'', b'x', b'l')), | itertools.cycle((b'', b'x', b'l')), | ||||
) | ) | ||||
) | ) | ||||
) | ) | ||||
class basemanifesttests(object): | class basemanifesttests: | ||||
def parsemanifest(self, text): | def parsemanifest(self, text): | ||||
raise NotImplementedError('parsemanifest not implemented by test case') | raise NotImplementedError('parsemanifest not implemented by test case') | ||||
def testEmptyManifest(self): | def testEmptyManifest(self): | ||||
m = self.parsemanifest(20, EMTPY_MANIFEST) | m = self.parsemanifest(20, EMTPY_MANIFEST) | ||||
self.assertEqual(0, len(m)) | self.assertEqual(0, len(m)) | ||||
self.assertEqual([], list(m)) | self.assertEqual([], list(m)) | ||||
) | ) | ||||
from hgext.remotefilelog import ( | from hgext.remotefilelog import ( | ||||
basepack, | basepack, | ||||
constants, | constants, | ||||
datapack, | datapack, | ||||
) | ) | ||||
class datapacktestsbase(object): | class datapacktestsbase: | ||||
def __init__(self, datapackreader, paramsavailable): | def __init__(self, datapackreader, paramsavailable): | ||||
self.datapackreader = datapackreader | self.datapackreader = datapackreader | ||||
self.paramsavailable = paramsavailable | self.paramsavailable = paramsavailable | ||||
def setUp(self): | def setUp(self): | ||||
self.tempdirs = [] | self.tempdirs = [] | ||||
def tearDown(self): | def tearDown(self): |
from mercurial.revlogutils import ( | from mercurial.revlogutils import ( | ||||
constants, | constants, | ||||
deltas, | deltas, | ||||
flagutil, | flagutil, | ||||
) | ) | ||||
class _NoTransaction(object): | class _NoTransaction: | ||||
"""transaction like object to update the nodemap outside a transaction""" | """transaction like object to update the nodemap outside a transaction""" | ||||
def __init__(self): | def __init__(self): | ||||
self._postclose = {} | self._postclose = {} | ||||
def addpostclose(self, callback_id, callback_func): | def addpostclose(self, callback_id, callback_func): | ||||
self._postclose[callback_id] = callback_func | self._postclose[callback_id] = callback_func | ||||
If optimaldelta is True, use optimized delta parent, so the destination | If optimaldelta is True, use optimized delta parent, so the destination | ||||
revlog could probably reuse it. Otherwise it builds sub-optimal delta, and | revlog could probably reuse it. Otherwise it builds sub-optimal delta, and | ||||
the destination revlog needs more work to use it. | the destination revlog needs more work to use it. | ||||
This exercises some revlog.addgroup (and revlog._addrevision(text=None)) | This exercises some revlog.addgroup (and revlog._addrevision(text=None)) | ||||
code path, which is not covered by "appendrev" alone. | code path, which is not covered by "appendrev" alone. | ||||
""" | """ | ||||
class dummychangegroup(object): | class dummychangegroup: | ||||
@staticmethod | @staticmethod | ||||
def deltachunk(pnode): | def deltachunk(pnode): | ||||
pnode = pnode or rlog.nullid | pnode = pnode or rlog.nullid | ||||
parentrev = rlog.rev(pnode) | parentrev = rlog.rev(pnode) | ||||
r = parentrev + 1 | r = parentrev + 1 | ||||
if r >= len(rlog): | if r >= len(rlog): | ||||
return {} | return {} | ||||
if optimaldelta: | if optimaldelta: |
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01F' | b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01F' | ||||
b'\x13\x00\x00\x00\x00\x01\xec\x00\x00\x03\x06\x00\x00\x00\x01' | b'\x13\x00\x00\x00\x00\x01\xec\x00\x00\x03\x06\x00\x00\x00\x01' | ||||
b'\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x12\xcb\xeby1' | b'\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x12\xcb\xeby1' | ||||
b'\xb6\r\x98B\xcb\x07\xbd`\x8f\x92\xd9\xc4\x84\xbdK\x00\x00\x00' | b'\xb6\r\x98B\xcb\x07\xbd`\x8f\x92\xd9\xc4\x84\xbdK\x00\x00\x00' | ||||
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00' | b'\x00\x00\x00\x00\x00\x00\x00\x00\x00' | ||||
) | ) | ||||
class fakechangelog(object): | class fakechangelog: | ||||
def __init__(self, idx): | def __init__(self, idx): | ||||
self.index = idx | self.index = idx | ||||
class fakerepo(object): | class fakerepo: | ||||
def __init__(self, idx): | def __init__(self, idx): | ||||
"""Just make so that self.changelog.index is the given idx.""" | """Just make so that self.changelog.index is the given idx.""" | ||||
self.changelog = fakechangelog(idx) | self.changelog = fakechangelog(idx) | ||||
@unittest.skipIf( | @unittest.skipIf( | ||||
PartialDiscovery is None or cparsers is None, | PartialDiscovery is None or cparsers is None, | ||||
"rustext or the C Extension parsers module " | "rustext or the C Extension parsers module " |
import unittest | import unittest | ||||
import silenttestrunner | import silenttestrunner | ||||
from mercurial import ( | from mercurial import ( | ||||
error, | error, | ||||
scmutil, | scmutil, | ||||
) | ) | ||||
class mockfile(object): | class mockfile: | ||||
def __init__(self, name, fs): | def __init__(self, name, fs): | ||||
self.name = name | self.name = name | ||||
self.fs = fs | self.fs = fs | ||||
def __enter__(self): | def __enter__(self): | ||||
return self | return self | ||||
def __exit__(self, *args, **kwargs): | def __exit__(self, *args, **kwargs): | ||||
pass | pass | ||||
def write(self, text): | def write(self, text): | ||||
self.fs.contents[self.name] = text | self.fs.contents[self.name] = text | ||||
def read(self): | def read(self): | ||||
return self.fs.contents[self.name] | return self.fs.contents[self.name] | ||||
class mockvfs(object): | class mockvfs: | ||||
def __init__(self): | def __init__(self): | ||||
self.contents = {} | self.contents = {} | ||||
def read(self, path): | def read(self, path): | ||||
return mockfile(path, self).read() | return mockfile(path, self).read() | ||||
def readlines(self, path): | def readlines(self, path): | ||||
# lines need to contain the trailing '\n' to mock the real readlines | # lines need to contain the trailing '\n' to mock the real readlines |
def mockserver(inbytes): | def mockserver(inbytes): | ||||
ui = mockui(inbytes) | ui = mockui(inbytes) | ||||
repo = mockrepo(ui) | repo = mockrepo(ui) | ||||
return wireprotoserver.sshserver(ui, repo) | return wireprotoserver.sshserver(ui, repo) | ||||
class mockrepo(object): | class mockrepo: | ||||
def __init__(self, ui): | def __init__(self, ui): | ||||
self.ui = ui | self.ui = ui | ||||
class mockui(object): | class mockui: | ||||
def __init__(self, inbytes): | def __init__(self, inbytes): | ||||
self.fin = io.BytesIO(inbytes) | self.fin = io.BytesIO(inbytes) | ||||
self.fout = io.BytesIO() | self.fout = io.BytesIO() | ||||
self.ferr = io.BytesIO() | self.ferr = io.BytesIO() | ||||
def protectfinout(self): | def protectfinout(self): | ||||
return self.fin, self.fout | return self.fin, self.fout | ||||
def restorefinout(self, fin, fout): | def restorefinout(self, fin, fout): | ||||
pass | pass | ||||
if __name__ == '__main__': | if __name__ == '__main__': | ||||
# Don't call into msvcrt to set BytesIO to binary mode | # Don't call into msvcrt to set BytesIO to binary mode | ||||
procutil.setbinary = lambda fp: True | procutil.setbinary = lambda fp: True | ||||
silenttestrunner.main(__name__) | silenttestrunner.main(__name__) |
import sys | import sys | ||||
from mercurial import ( | from mercurial import ( | ||||
error, | error, | ||||
pycompat, | pycompat, | ||||
ui as uimod, | ui as uimod, | ||||
util, | util, | ||||
wireprototypes, | wireprototypes, | ||||
wireprotov1peer, | wireprotov1peer, | ||||
wireprotov1server, | wireprotov1server, | ||||
) | ) | ||||
from mercurial.utils import stringutil | from mercurial.utils import stringutil | ||||
stringio = util.stringio | stringio = util.stringio | ||||
class proto(object): | class proto: | ||||
def __init__(self, args): | def __init__(self, args): | ||||
self.args = args | self.args = args | ||||
self.name = 'dummyproto' | self.name = 'dummyproto' | ||||
def getargs(self, spec): | def getargs(self, spec): | ||||
args = self.args | args = self.args | ||||
args.setdefault(b'*', {}) | args.setdefault(b'*', {}) | ||||
names = spec.split() | names = spec.split() | ||||
def _callstream(self, cmd, **args): | def _callstream(self, cmd, **args): | ||||
return stringio(self._call(cmd, **args)) | return stringio(self._call(cmd, **args)) | ||||
@wireprotov1peer.batchable | @wireprotov1peer.batchable | ||||
def greet(self, name): | def greet(self, name): | ||||
return {b'name': mangle(name)}, unmangle | return {b'name': mangle(name)}, unmangle | ||||
class serverrepo(object): | class serverrepo: | ||||
def __init__(self, ui): | def __init__(self, ui): | ||||
self.ui = ui | self.ui = ui | ||||
def greet(self, name): | def greet(self, name): | ||||
return b"Hello, " + name | return b"Hello, " + name | ||||
def filtered(self, name): | def filtered(self, name): | ||||
return self | return self |
) | ) | ||||
configitem( | configitem( | ||||
b'badserver', | b'badserver', | ||||
b'close-before-accept', | b'close-before-accept', | ||||
default=False, | default=False, | ||||
) | ) | ||||
class ConditionTracker(object): | class ConditionTracker: | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
close_after_recv_bytes, | close_after_recv_bytes, | ||||
close_after_recv_patterns, | close_after_recv_patterns, | ||||
close_after_send_bytes, | close_after_send_bytes, | ||||
close_after_send_patterns, | close_after_send_patterns, | ||||
): | ): | ||||
self._all_close_after_recv_bytes = close_after_recv_bytes | self._all_close_after_recv_bytes = close_after_recv_bytes | ||||
# This is the easiest way to abort the current request. | # This is the easiest way to abort the current request. | ||||
raise Exception('connection closed after receiving N bytes') | raise Exception('connection closed after receiving N bytes') | ||||
return result | return result | ||||
# We can't adjust __class__ on a socket instance. So we define a proxy type. | # We can't adjust __class__ on a socket instance. So we define a proxy type. | ||||
class socketproxy(object): | class socketproxy: | ||||
__slots__ = ('_orig', '_logfp', '_cond') | __slots__ = ('_orig', '_logfp', '_cond') | ||||
def __init__(self, obj, logfp, condition_tracked): | def __init__(self, obj, logfp, condition_tracked): | ||||
object.__setattr__(self, '_orig', obj) | object.__setattr__(self, '_orig', obj) | ||||
object.__setattr__(self, '_logfp', logfp) | object.__setattr__(self, '_logfp', logfp) | ||||
object.__setattr__(self, '_cond', condition_tracked) | object.__setattr__(self, '_cond', condition_tracked) | ||||
def __getattribute__(self, name): | def __getattribute__(self, name): | ||||
cond = object.__getattribute__(self, '_cond') | cond = object.__getattribute__(self, '_cond') | ||||
return cond.forward_write(self, 'sendall', data, flags) | return cond.forward_write(self, 'sendall', data, flags) | ||||
def _cond_close(self): | def _cond_close(self): | ||||
object.__getattribute__(self, '_orig').shutdown(socket.SHUT_RDWR) | object.__getattribute__(self, '_orig').shutdown(socket.SHUT_RDWR) | ||||
# We can't adjust __class__ on socket._fileobject, so define a proxy. | # We can't adjust __class__ on socket._fileobject, so define a proxy. | ||||
class fileobjectproxy(object): | class fileobjectproxy: | ||||
__slots__ = ('_orig', '_logfp', '_cond') | __slots__ = ('_orig', '_logfp', '_cond') | ||||
def __init__(self, obj, logfp, condition_tracked): | def __init__(self, obj, logfp, condition_tracked): | ||||
object.__setattr__(self, '_orig', obj) | object.__setattr__(self, '_orig', obj) | ||||
object.__setattr__(self, '_logfp', logfp) | object.__setattr__(self, '_logfp', logfp) | ||||
object.__setattr__(self, '_cond', condition_tracked) | object.__setattr__(self, '_cond', condition_tracked) | ||||
def __getattribute__(self, name): | def __getattribute__(self, name): |
Here a py2 compat fix in case you want to follow-up