black 22.1.0 is the first non-beta release of black. I think it makes
sense for us to adopt this version of black.
This commit blackens the repo with version 22.1.0.
skip-blame: formatting only changes with black
Alphare |
hg-reviewers |
black 22.1.0 is the first non-beta release of black. I think it makes
sense for us to adopt this version of black.
This commit blackens the repo with version 22.1.0.
skip-blame: formatting only changes with black
No Linters Available |
No Unit Test Coverage |
FYI I'm updating the heptapod CI to have this version of black as well. This might be a problem with stable since we'd need to use a different base CI image, but looking into pinning images was a long time coming anyway. I'll have a look at the rest of the patches in the meantime.
In D12218#187693, @Alphare wrote:FYI I'm updating the heptapod CI to have this version of black as well. This might be a problem with stable since we'd need to use a different base CI image, but looking into pinning images was a long time coming anyway. I'll have a look at the rest of the patches in the meantime.
Oh, was that in the repo? Did I miss it?
Regarding stable, this series is small enough and it is early enough in the release cycle that we could probably apply this to stable with minimal effort. The sooner we do that the better, as there is bound to be churn on default soon.
In D12218#187833, @indygreg wrote:In D12218#187693, @Alphare wrote:FYI I'm updating the heptapod CI to have this version of black as well. This might be a problem with stable since we'd need to use a different base CI image, but looking into pinning images was a long time coming anyway. I'll have a look at the rest of the patches in the meantime.
Oh, was that in the repo? Did I miss it?
It's in a separate repo, I started the upgrade in response to this patch, so no worries.
Regarding stable, this series is small enough and it is early enough in the release cycle that we could probably apply this to stable with minimal effort. The sooner we do that the better, as there is bound to be churn on default soon.
I think this is part of a deeper issue with how we manage our images and backwards-compat in the Heptapod CI.
We need a small amount of work to make sure that kind of upgrade goes well in the future (as we're bound to upgrade Rust, Black, etc. more). This series and the one that depends on it will have to wait until early next week when this work is done so we can properly and cleanly address the root cause.
I'll still continue to review the patches so the potential back-and-forth needed can happen. I'm marking this as "changes requested" just as a flag for the issue above so as to not break the CI.
Commit | Parents | Author | Summary | Date |
---|---|---|---|---|
36d2f461a53a | 7b068abe4aa2 | Gregory Szorc | Feb 19 2022, 7:39 PM |
Status | Author | Revision | |
---|---|---|---|
Needs Review | indygreg | D12220 tests: require black 22.1.0 | |
Needs Review | indygreg | ||
Needs Revision | indygreg | D12218 black: blacken with 22.1.0 |
coldelta += 1 | coldelta += 1 | ||||
continue | continue | ||||
# This looks like a function call. | # This looks like a function call. | ||||
if t.type == token.NAME and _isop(i + 1, '('): | if t.type == token.NAME and _isop(i + 1, '('): | ||||
fn = t.string | fn = t.string | ||||
# *attr() builtins don't accept byte strings to 2nd argument. | # *attr() builtins don't accept byte strings to 2nd argument. | ||||
if ( | if fn in ( | ||||
fn | |||||
in ( | |||||
'getattr', | 'getattr', | ||||
'setattr', | 'setattr', | ||||
'hasattr', | 'hasattr', | ||||
'safehasattr', | 'safehasattr', | ||||
'wrapfunction', | 'wrapfunction', | ||||
'wrapclass', | 'wrapclass', | ||||
'addattr', | 'addattr', | ||||
) | ) and (opts['allow-attr-methods'] or not _isop(i - 1, '.')): | ||||
and (opts['allow-attr-methods'] or not _isop(i - 1, '.')) | |||||
): | |||||
arg1idx = _findargnofcall(1) | arg1idx = _findargnofcall(1) | ||||
if arg1idx is not None: | if arg1idx is not None: | ||||
_ensuresysstr(arg1idx) | _ensuresysstr(arg1idx) | ||||
# .encode() and .decode() on str/bytes/unicode don't accept | # .encode() and .decode() on str/bytes/unicode don't accept | ||||
# byte strings on Python 3. | # byte strings on Python 3. | ||||
elif fn in ('encode', 'decode') and _isop(i - 1, '.'): | elif fn in ('encode', 'decode') and _isop(i - 1, '.'): | ||||
for argn in range(2): | for argn in range(2): |
if sys.version_info[0] > 2: | if sys.version_info[0] > 2: | ||||
def mkstr(b): | def mkstr(b): | ||||
if isinstance(b, str): | if isinstance(b, str): | ||||
return b | return b | ||||
return b.decode('utf8') | return b.decode('utf8') | ||||
else: | else: | ||||
mkstr = lambda x: x | mkstr = lambda x: x | ||||
def main(args): | def main(args): | ||||
for f in args: | for f in args: | ||||
sect = b'' | sect = b'' | ||||
prevname = b'' | prevname = b'' |
if sys.version_info[0] < 3: | if sys.version_info[0] < 3: | ||||
class py2reprhack(object): | class py2reprhack(object): | ||||
def __repr__(self): | def __repr__(self): | ||||
"""Py2 calls __repr__ for `bytes(foo)`, forward to __bytes__""" | """Py2 calls __repr__ for `bytes(foo)`, forward to __bytes__""" | ||||
return self.__bytes__() | return self.__bytes__() | ||||
else: | else: | ||||
class py2reprhack(object): | class py2reprhack(object): | ||||
"""Not needed on py3.""" | """Not needed on py3.""" | ||||
class deltafrag(py2reprhack): | class deltafrag(py2reprhack): | ||||
def __init__(self, start, end, data): | def __init__(self, start, end, data): |
stderr = sys.stderr.buffer | stderr = sys.stderr.buffer | ||||
stringio = io.BytesIO | stringio = io.BytesIO | ||||
def bprint(*args): | def bprint(*args): | ||||
# remove b'' as well for ease of test migration | # remove b'' as well for ease of test migration | ||||
pargs = [re.sub(br'''\bb(['"])''', br'\1', b'%s' % a) for a in args] | pargs = [re.sub(br'''\bb(['"])''', br'\1', b'%s' % a) for a in args] | ||||
stdout.write(b' '.join(pargs) + b'\n') | stdout.write(b' '.join(pargs) + b'\n') | ||||
else: | else: | ||||
import cStringIO | import cStringIO | ||||
stdout = sys.stdout | stdout = sys.stdout | ||||
stderr = sys.stderr | stderr = sys.stderr | ||||
stringio = cStringIO.StringIO | stringio = cStringIO.StringIO | ||||
bprint = print | bprint = print | ||||
try: | try: | ||||
from mercurial.revlogutils import constants as revlog_constants | from mercurial.revlogutils import constants as revlog_constants | ||||
perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf') | perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf') | ||||
def revlog(opener, *args, **kwargs): | def revlog(opener, *args, **kwargs): | ||||
return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs) | return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs) | ||||
except (ImportError, AttributeError): | except (ImportError, AttributeError): | ||||
perf_rl_kind = None | perf_rl_kind = None | ||||
def revlog(opener, *args, **kwargs): | def revlog(opener, *args, **kwargs): | ||||
return mercurial.revlog.revlog(opener, *args, **kwargs) | return mercurial.revlog.revlog(opener, *args, **kwargs) | ||||
def identity(a): | def identity(a): | ||||
# been available since 3.1 (or 75a96326cecb) | # been available since 3.1 (or 75a96326cecb) | ||||
_command = command | _command = command | ||||
def command(name, options=(), synopsis=None, norepo=False): | def command(name, options=(), synopsis=None, norepo=False): | ||||
if norepo: | if norepo: | ||||
commands.norepo += b' %s' % b' '.join(parsealiases(name)) | commands.norepo += b' %s' % b' '.join(parsealiases(name)) | ||||
return _command(name, list(options), synopsis) | return _command(name, list(options), synopsis) | ||||
else: | else: | ||||
# for "historical portability": | # for "historical portability": | ||||
# define "@command" annotation locally, because cmdutil.command | # define "@command" annotation locally, because cmdutil.command | ||||
# has been available since 1.9 (or 2daa5179e73f) | # has been available since 1.9 (or 2daa5179e73f) | ||||
def command(name, options=(), synopsis=None, norepo=False): | def command(name, options=(), synopsis=None, norepo=False): | ||||
def decorator(func): | def decorator(func): | ||||
if synopsis: | if synopsis: | ||||
cmdtable[name] = func, list(options), synopsis | cmdtable[name] = func, list(options), synopsis | ||||
* -10000: + 0 | * -10000: + 0 | ||||
It is not currently possible to check for lookup of a missing node. For | It is not currently possible to check for lookup of a missing node. For | ||||
deeper lookup benchmarking, checkout the `perfnodemap` command.""" | deeper lookup benchmarking, checkout the `perfnodemap` command.""" | ||||
import mercurial.revlog | import mercurial.revlog | ||||
opts = _byteskwargs(opts) | opts = _byteskwargs(opts) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg | ||||
if opts[b'no_lookup']: | if opts[b'no_lookup']: | ||||
if opts['rev']: | if opts['rev']: | ||||
raise error.Abort('--no-lookup and --rev are mutually exclusive') | raise error.Abort('--no-lookup and --rev are mutually exclusive') | ||||
nodes = [] | nodes = [] | ||||
elif not opts[b'rev']: | elif not opts[b'rev']: | ||||
nodes = [repo[b"tip"].node()] | nodes = [repo[b"tip"].node()] | ||||
else: | else: | ||||
revs = scmutil.revrange(repo, opts[b'rev']) | revs = scmutil.revrange(repo, opts[b'rev']) | ||||
The command currently focus on valid binary lookup. Benchmarking for | The command currently focus on valid binary lookup. Benchmarking for | ||||
hexlookup, prefix lookup and missing lookup would also be valuable. | hexlookup, prefix lookup and missing lookup would also be valuable. | ||||
""" | """ | ||||
import mercurial.revlog | import mercurial.revlog | ||||
opts = _byteskwargs(opts) | opts = _byteskwargs(opts) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg | ||||
unfi = repo.unfiltered() | unfi = repo.unfiltered() | ||||
clearcaches = opts[b'clear_caches'] | clearcaches = opts[b'clear_caches'] | ||||
# find the filecache func directly | # find the filecache func directly | ||||
# This avoid polluting the benchmark with the filecache logic | # This avoid polluting the benchmark with the filecache logic | ||||
makecl = unfi.__class__.changelog.func | makecl = unfi.__class__.changelog.func | ||||
if not opts[b'rev']: | if not opts[b'rev']: | ||||
raise error.Abort(b'use --rev to specify revisions to look up') | raise error.Abort(b'use --rev to specify revisions to look up') | ||||
@command(b'perf::nodelookup|perfnodelookup', formatteropts) | @command(b'perf::nodelookup|perfnodelookup', formatteropts) | ||||
def perfnodelookup(ui, repo, rev, **opts): | def perfnodelookup(ui, repo, rev, **opts): | ||||
opts = _byteskwargs(opts) | opts = _byteskwargs(opts) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
import mercurial.revlog | import mercurial.revlog | ||||
mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg | ||||
n = scmutil.revsingle(repo, rev).node() | n = scmutil.revsingle(repo, rev).node() | ||||
try: | try: | ||||
cl = revlog(getsvfs(repo), radix=b"00changelog") | cl = revlog(getsvfs(repo), radix=b"00changelog") | ||||
except TypeError: | except TypeError: | ||||
cl = revlog(getsvfs(repo), indexfile=b"00changelog.i") | cl = revlog(getsvfs(repo), indexfile=b"00changelog.i") | ||||
def d(): | def d(): |
def test_stupidly_large_output_buffer(self): | def test_stupidly_large_output_buffer(self): | ||||
cctx = zstd.ZstdCompressor(write_content_size=False) | cctx = zstd.ZstdCompressor(write_content_size=False) | ||||
compressed = cctx.compress(b"foobar" * 256) | compressed = cctx.compress(b"foobar" * 256) | ||||
dctx = zstd.ZstdDecompressor() | dctx = zstd.ZstdDecompressor() | ||||
# Will get OverflowError on some Python distributions that can't | # Will get OverflowError on some Python distributions that can't | ||||
# handle really large integers. | # handle really large integers. | ||||
with self.assertRaises((MemoryError, OverflowError)): | with self.assertRaises((MemoryError, OverflowError)): | ||||
dctx.decompress(compressed, max_output_size=2 ** 62) | dctx.decompress(compressed, max_output_size=2**62) | ||||
def test_dictionary(self): | def test_dictionary(self): | ||||
samples = [] | samples = [] | ||||
for i in range(128): | for i in range(128): | ||||
samples.append(b"foo" * 64) | samples.append(b"foo" * 64) | ||||
samples.append(b"bar" * 64) | samples.append(b"bar" * 64) | ||||
samples.append(b"foobar" * 64) | samples.append(b"foobar" * 64) | ||||
with open(__file__, "rb") as fh: | with open(__file__, "rb") as fh: | ||||
source = fh.read() | source = fh.read() | ||||
# If we write a content size, the decompressor engages single pass | # If we write a content size, the decompressor engages single pass | ||||
# mode and the window size doesn't come into play. | # mode and the window size doesn't come into play. | ||||
cctx = zstd.ZstdCompressor(write_content_size=False) | cctx = zstd.ZstdCompressor(write_content_size=False) | ||||
frame = cctx.compress(source) | frame = cctx.compress(source) | ||||
dctx = zstd.ZstdDecompressor(max_window_size=2 ** zstd.WINDOWLOG_MIN) | dctx = zstd.ZstdDecompressor(max_window_size=2**zstd.WINDOWLOG_MIN) | ||||
with self.assertRaisesRegex( | with self.assertRaisesRegex( | ||||
zstd.ZstdError, | zstd.ZstdError, | ||||
"decompression error: Frame requires too much memory", | "decompression error: Frame requires too much memory", | ||||
): | ): | ||||
dctx.decompress(frame, max_output_size=len(source)) | dctx.decompress(frame, max_output_size=len(source)) | ||||
order = int(math.log(factor)) + 1 | order = int(math.log(factor)) + 1 | ||||
while math.log(factor) > 1: | while math.log(factor) > 1: | ||||
factor //= 0 | factor //= 0 | ||||
return 'x%ix%i' % (factor, order) | return 'x%ix%i' % (factor, order) | ||||
def formattiming(value): | def formattiming(value): | ||||
"""format a value to strictly 8 char, dropping some precision if needed""" | """format a value to strictly 8 char, dropping some precision if needed""" | ||||
if value < 10 ** 7: | if value < 10**7: | ||||
return ('%.6f' % value)[:8] | return ('%.6f' % value)[:8] | ||||
else: | else: | ||||
# value is HUGE very unlikely to happen (4+ month run) | # value is HUGE very unlikely to happen (4+ month run) | ||||
return '%i' % value | return '%i' % value | ||||
_marker = object() | _marker = object() | ||||
def sysstr(s): | def sysstr(s): | ||||
if isinstance(s, builtins.str): | if isinstance(s, builtins.str): | ||||
return s | return s | ||||
return s.decode('latin-1') | return s.decode('latin-1') | ||||
def opentext(f): | def opentext(f): | ||||
return open(f, 'r') | return open(f, 'r') | ||||
else: | else: | ||||
bytestr = str | bytestr = str | ||||
sysstr = identity | sysstr = identity | ||||
opentext = open | opentext = open | ||||
def b2s(x): | def b2s(x): |
print( | print( | ||||
"[%s] %s" | "[%s] %s" | ||||
% ( | % ( | ||||
time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime()), | time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime()), | ||||
fmt % args[:], | fmt % args[:], | ||||
) | ) | ||||
) | ) | ||||
else: | else: | ||||
def log(fmt, *args): | def log(fmt, *args): | ||||
pass | pass | ||||
def _win32_strerror(err): | def _win32_strerror(err): | ||||
"""expand a win32 error code into a human readable message""" | """expand a win32 error code into a human readable message""" |
value = tp() | value = tp() | ||||
if value.__traceback__ is not tb: | if value.__traceback__ is not tb: | ||||
raise value.with_traceback(tb) | raise value.with_traceback(tb) | ||||
raise value | raise value | ||||
finally: | finally: | ||||
value = None | value = None | ||||
tb = None | tb = None | ||||
else: | else: | ||||
exec( | exec( | ||||
""" | """ | ||||
def reraise(tp, value, tb=None): | def reraise(tp, value, tb=None): | ||||
try: | try: | ||||
raise tp, value, tb | raise tp, value, tb | ||||
finally: | finally: | ||||
tb = None | tb = None | ||||
""".strip() | """.strip() | ||||
) | ) | ||||
if PYTHON3: | if PYTHON3: | ||||
UNICODE = str | UNICODE = str | ||||
else: | else: | ||||
UNICODE = unicode # noqa: F821 We handled versioning above | UNICODE = unicode # noqa: F821 We handled versioning above |
def get_local_encoding(): | def get_local_encoding(): | ||||
if sys.platform == "win32": | if sys.platform == "win32": | ||||
# Watchman always returns UTF-8 encoded strings on Windows. | # Watchman always returns UTF-8 encoded strings on Windows. | ||||
return "utf-8" | return "utf-8" | ||||
# On the Python 3 versions we support, sys.getfilesystemencoding never | # On the Python 3 versions we support, sys.getfilesystemencoding never | ||||
# returns None. | # returns None. | ||||
return sys.getfilesystemencoding() | return sys.getfilesystemencoding() | ||||
else: | else: | ||||
# Python 2 doesn't support surrogateescape, so use 'strict' by | # Python 2 doesn't support surrogateescape, so use 'strict' by | ||||
# default. Users can register a custom surrogateescape error handler and use | # default. Users can register a custom surrogateescape error handler and use | ||||
# that if they so desire. | # that if they so desire. | ||||
default_local_errors = "strict" | default_local_errors = "strict" | ||||
def get_local_encoding(): | def get_local_encoding(): | ||||
if sys.platform == "win32": | if sys.platform == "win32": |
# The number of entries in the index at which point we switch to a large fanout. | # The number of entries in the index at which point we switch to a large fanout. | ||||
# It is chosen to balance the linear scan through a sparse fanout, with the | # It is chosen to balance the linear scan through a sparse fanout, with the | ||||
# size of the bisect in actual index. | # size of the bisect in actual index. | ||||
# 2^16 / 8 was chosen because it trades off (1 step fanout scan + 5 step | # 2^16 / 8 was chosen because it trades off (1 step fanout scan + 5 step | ||||
# bisect) with (8 step fanout scan + 1 step bisect) | # bisect) with (8 step fanout scan + 1 step bisect) | ||||
# 5 step bisect = log(2^16 / 8 / 255) # fanout | # 5 step bisect = log(2^16 / 8 / 255) # fanout | ||||
# 10 step fanout scan = 2^16 / (2^16 / 8) # fanout space divided by entries | # 10 step fanout scan = 2^16 / (2^16 / 8) # fanout space divided by entries | ||||
SMALLFANOUTCUTOFF = 2 ** 16 // 8 | SMALLFANOUTCUTOFF = 2**16 // 8 | ||||
# The amount of time to wait between checking for new packs. This prevents an | # The amount of time to wait between checking for new packs. This prevents an | ||||
# exception when data is moved to a new pack after the process has already | # exception when data is moved to a new pack after the process has already | ||||
# loaded the pack list. | # loaded the pack list. | ||||
REFRESHRATE = 0.1 | REFRESHRATE = 0.1 | ||||
if pycompat.isposix and not pycompat.ispy3: | if pycompat.isposix and not pycompat.ispy3: | ||||
# With glibc 2.7+ the 'e' flag uses O_CLOEXEC when opening. | # With glibc 2.7+ the 'e' flag uses O_CLOEXEC when opening. | ||||
raise RuntimeError(b'inconsistent version: %d' % version) | raise RuntimeError(b'inconsistent version: %d' % version) | ||||
else: | else: | ||||
raise RuntimeError(b'unsupported version: %d' % version) | raise RuntimeError(b'unsupported version: %d' % version) | ||||
class basepack(versionmixin): | class basepack(versionmixin): | ||||
# The maximum amount we should read via mmap before remmaping so the old | # The maximum amount we should read via mmap before remmaping so the old | ||||
# pages can be released (100MB) | # pages can be released (100MB) | ||||
MAXPAGEDIN = 100 * 1024 ** 2 | MAXPAGEDIN = 100 * 1024**2 | ||||
SUPPORTED_VERSIONS = [2] | SUPPORTED_VERSIONS = [2] | ||||
def __init__(self, path): | def __init__(self, path): | ||||
self.path = path | self.path = path | ||||
self.packpath = path + self.PACKSUFFIX | self.packpath = path + self.PACKSUFFIX | ||||
self.indexpath = path + self.INDEXSUFFIX | self.indexpath = path + self.INDEXSUFFIX | ||||
# v1 has metadata support | # v1 has metadata support | ||||
SUPPORTED_VERSIONS = [2] | SUPPORTED_VERSIONS = [2] | ||||
def _compress(self, data): | def _compress(self, data): | ||||
return zlib.compress(data) | return zlib.compress(data) | ||||
def add(self, name, node, deltabasenode, delta, metadata=None): | def add(self, name, node, deltabasenode, delta, metadata=None): | ||||
# metadata is a dict, ex. {METAKEYFLAG: flag} | # metadata is a dict, ex. {METAKEYFLAG: flag} | ||||
if len(name) > 2 ** 16: | if len(name) > 2**16: | ||||
raise RuntimeError(_(b"name too long %s") % name) | raise RuntimeError(_(b"name too long %s") % name) | ||||
if len(node) != 20: | if len(node) != 20: | ||||
raise RuntimeError(_(b"node should be 20 bytes %s") % node) | raise RuntimeError(_(b"node should be 20 bytes %s") % node) | ||||
if node in self.entries: | if node in self.entries: | ||||
# The revision has already been added | # The revision has already been added | ||||
return | return | ||||
text_type = unicode | text_type = unicode | ||||
def b(s): | def b(s): | ||||
return s | return s | ||||
def u(s): | def u(s): | ||||
return unicode(s, "unicode_escape") | return unicode(s, "unicode_escape") | ||||
else: | else: | ||||
PY3 = True | PY3 = True | ||||
text_type = str | text_type = str | ||||
def b(s): | def b(s): | ||||
return s.encode("latin-1") | return s.encode("latin-1") | ||||
def u(s): | def u(s): |
def _writetempbundle(self, readfn, suffix, header=b''): | def _writetempbundle(self, readfn, suffix, header=b''): | ||||
"""Write a temporary file to disk""" | """Write a temporary file to disk""" | ||||
fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix) | fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix) | ||||
self.tempfile = temp | self.tempfile = temp | ||||
with os.fdopen(fdtemp, 'wb') as fptemp: | with os.fdopen(fdtemp, 'wb') as fptemp: | ||||
fptemp.write(header) | fptemp.write(header) | ||||
while True: | while True: | ||||
chunk = readfn(2 ** 18) | chunk = readfn(2**18) | ||||
if not chunk: | if not chunk: | ||||
break | break | ||||
fptemp.write(chunk) | fptemp.write(chunk) | ||||
return self.vfs.open(self.tempfile, mode=b"rb") | return self.vfs.open(self.tempfile, mode=b"rb") | ||||
@localrepo.unfilteredpropertycache | @localrepo.unfilteredpropertycache | ||||
def _phasecache(self): | def _phasecache(self): |
from typing import ( | from typing import ( | ||||
List, | List, | ||||
Tuple, | Tuple, | ||||
) | ) | ||||
version: int | version: int | ||||
def bdiff(a: bytes, b: bytes): bytes | def bdiff(a: bytes, b: bytes): | ||||
bytes | |||||
def blocks(a: bytes, b: bytes) -> List[Tuple[int, int, int, int]]: ... | def blocks(a: bytes, b: bytes) -> List[Tuple[int, int, int, int]]: ... | ||||
def fixws(s: bytes, allws: bool) -> bytes: ... | def fixws(s: bytes, allws: bool) -> bytes: ... | ||||
def splitnewlines(text: bytes) -> List[bytes]: ... | def splitnewlines(text: bytes) -> List[bytes]: ... | ||||
def xdiffblocks(a: bytes, b: bytes) -> List[Tuple[int, int, int, int]]: ... | def xdiffblocks(a: bytes, b: bytes) -> List[Tuple[int, int, int, int]]: ... |
def __iter__(self) -> Iterator[bytes]: ... | def __iter__(self) -> Iterator[bytes]: ... | ||||
def addpath(self, path: bytes) -> None: ... | def addpath(self, path: bytes) -> None: ... | ||||
def delpath(self, path: bytes) -> None: ... | def delpath(self, path: bytes) -> None: ... | ||||
# From manifest.c | # From manifest.c | ||||
class lazymanifest: | class lazymanifest: | ||||
def __init__(self, nodelen: int, data: bytes): ... | def __init__(self, nodelen: int, data: bytes): ... | ||||
def __iter__(self) -> Iterator[bytes]: ... | def __iter__(self) -> Iterator[bytes]: ... | ||||
def __len__(self) -> int: ... | def __len__(self) -> int: ... | ||||
def __getitem__(self, item: bytes) -> Optional[Tuple[bytes, bytes]]: ... | def __getitem__(self, item: bytes) -> Optional[Tuple[bytes, bytes]]: ... | ||||
def __setitem__(self, key: bytes, value: Tuple[bytes, bytes]) -> None: ... | def __setitem__(self, key: bytes, value: Tuple[bytes, bytes]) -> None: ... | ||||
def __delitem__(self, key: bytes) -> None: ... | def __delitem__(self, key: bytes) -> None: ... | ||||
def iterkeys(self) -> Iterator[bytes]: ... | def iterkeys(self) -> Iterator[bytes]: ... | ||||
def iterentries(self) -> Iterator[Tuple[bytes, bytes, bytes]]: ... | def iterentries(self) -> Iterator[Tuple[bytes, bytes, bytes]]: ... | ||||
def copy(self) -> lazymanifest: ... | def copy(self) -> lazymanifest: ... | ||||
def filtercopy(self, matchfn: Callable[[bytes], bool]) -> lazymanifest: ... | def filtercopy(self, matchfn: Callable[[bytes], bool]) -> lazymanifest: ... | ||||
def diff(self, other: lazymanifest, clean: Optional[bool]) -> Dict[bytes, Tuple[bytes, Tuple]]: ... | def diff( | ||||
self, other: lazymanifest, clean: Optional[bool] | |||||
) -> Dict[bytes, Tuple[bytes, Tuple]]: ... | |||||
def text(self) -> bytes: ... | def text(self) -> bytes: ... | ||||
# From revlog.c | # From revlog.c | ||||
class index: | class index: | ||||
__doc__: str | __doc__: str | ||||
nodemap: Dict[bytes, int] | nodemap: Dict[bytes, int] | ||||
def ancestors(self, *args: int) -> Iterator[int]: ... | def ancestors(self, *args: int) -> Iterator[int]: ... | ||||
def commonancestorsheads(self, *args: int) -> List[int]: ... | def commonancestorsheads(self, *args: int) -> List[int]: ... | ||||
def clearcaches(self) -> None: ... | def clearcaches(self) -> None: ... | ||||
def get(self, value: bytes) -> Optional[int]: ... | def get(self, value: bytes) -> Optional[int]: ... | ||||
def get_rev(self, value: bytes) -> Optional[int]: ... | def get_rev(self, value: bytes) -> Optional[int]: ... | ||||
def has_node(self, value: Union[int, bytes]) -> bool: ... | def has_node(self, value: Union[int, bytes]) -> bool: ... | ||||
def rev(self, node: bytes) -> int: ... | def rev(self, node: bytes) -> int: ... | ||||
def computephasesmapsets(self, root: Dict[int, Set[bytes]]) -> Tuple[int, Dict[int, Set[bytes]]]: ... | def computephasesmapsets( | ||||
def reachableroots2(self, minroot: int, heads: List[int], roots: List[int], includepath: bool) -> List[int]: ... | self, root: Dict[int, Set[bytes]] | ||||
) -> Tuple[int, Dict[int, Set[bytes]]]: ... | |||||
def reachableroots2( | |||||
self, | |||||
minroot: int, | |||||
heads: List[int], | |||||
roots: List[int], | |||||
includepath: bool, | |||||
) -> List[int]: ... | |||||
def headrevs(self, filteredrevs: Optional[List[int]]) -> List[int]: ... | def headrevs(self, filteredrevs: Optional[List[int]]) -> List[int]: ... | ||||
def headrevsfiltered(self, filteredrevs: Optional[List[int]]) -> List[int]: ... | def headrevsfiltered( | ||||
self, filteredrevs: Optional[List[int]] | |||||
) -> List[int]: ... | |||||
def issnapshot(self, value: int) -> bool: ... | def issnapshot(self, value: int) -> bool: ... | ||||
def findsnapshots(self, cache: Dict[int, List[int]], start_rev: int) -> None: ... | def findsnapshots( | ||||
def deltachain(self, rev: int, stop: int, generaldelta: bool) -> Tuple[List[int], bool]: ... | self, cache: Dict[int, List[int]], start_rev: int | ||||
def slicechunktodensity(self, revs: List[int], targetdensity: float, mingapsize: int) -> List[List[int]]: ... | ) -> None: ... | ||||
def append(self, value: Tuple[int, int, int, int, int, int, int, bytes]) -> None: ... | def deltachain( | ||||
self, rev: int, stop: int, generaldelta: bool | |||||
) -> Tuple[List[int], bool]: ... | |||||
def slicechunktodensity( | |||||
self, revs: List[int], targetdensity: float, mingapsize: int | |||||
) -> List[List[int]]: ... | |||||
def append( | |||||
self, value: Tuple[int, int, int, int, int, int, int, bytes] | |||||
) -> None: ... | |||||
def partialmatch(self, node: bytes) -> bytes: ... | def partialmatch(self, node: bytes) -> bytes: ... | ||||
def shortest(self, value: bytes) -> int: ... | def shortest(self, value: bytes) -> int: ... | ||||
def stats(self) -> Dict[bytes, int]: ... | def stats(self) -> Dict[bytes, int]: ... | ||||
class nodetree: | class nodetree: | ||||
__doc__: str | __doc__: str | ||||
def insert(self, rev: int) -> None: ... | def insert(self, rev: int) -> None: ... | ||||
def shortest(self, node: bytes) -> int: ... | def shortest(self, node: bytes) -> int: ... |
parts += 1 | parts += 1 | ||||
elif noentries: | elif noentries: | ||||
parts += 1 | parts += 1 | ||||
break | break | ||||
noentries = False | noentries = False | ||||
yield chunkheader(len(chunk)) | yield chunkheader(len(chunk)) | ||||
pos = 0 | pos = 0 | ||||
while pos < len(chunk): | while pos < len(chunk): | ||||
next = pos + 2 ** 20 | next = pos + 2**20 | ||||
yield chunk[pos:next] | yield chunk[pos:next] | ||||
pos = next | pos = next | ||||
yield closechunk() | yield closechunk() | ||||
def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None): | def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None): | ||||
self.callback = prog.increment | self.callback = prog.increment | ||||
# no need to check for empty manifest group here: | # no need to check for empty manifest group here: | ||||
# if the result of the merge of 1 and 2 is the same in 3 and 4, | # if the result of the merge of 1 and 2 is the same in 3 and 4, |
self, copy=None, renamedelete=None, dirmove=None, movewithdir=None | self, copy=None, renamedelete=None, dirmove=None, movewithdir=None | ||||
): | ): | ||||
self.copy = {} if copy is None else copy | self.copy = {} if copy is None else copy | ||||
self.renamedelete = {} if renamedelete is None else renamedelete | self.renamedelete = {} if renamedelete is None else renamedelete | ||||
self.dirmove = {} if dirmove is None else dirmove | self.dirmove = {} if dirmove is None else dirmove | ||||
self.movewithdir = {} if movewithdir is None else movewithdir | self.movewithdir = {} if movewithdir is None else movewithdir | ||||
def __repr__(self): | def __repr__(self): | ||||
return '<branch_copies\n copy=%r\n renamedelete=%r\n dirmove=%r\n movewithdir=%r\n>' % ( | return ( | ||||
'<branch_copies\n copy=%r\n renamedelete=%r\n dirmove=%r\n movewithdir=%r\n>' | |||||
% ( | |||||
self.copy, | self.copy, | ||||
self.renamedelete, | self.renamedelete, | ||||
self.dirmove, | self.dirmove, | ||||
self.movewithdir, | self.movewithdir, | ||||
) | ) | ||||
) | |||||
def _fullcopytracing(repo, c1, c2, base): | def _fullcopytracing(repo, c1, c2, base): | ||||
"""The full copytracing algorithm which finds all the new files that were | """The full copytracing algorithm which finds all the new files that were | ||||
added from merge base up to the top commit and for each file it checks if | added from merge base up to the top commit and for each file it checks if | ||||
this file was copied from another file. | this file was copied from another file. | ||||
This is pretty slow when a lot of changesets are involved but will track all | This is pretty slow when a lot of changesets are involved but will track all |
pass | pass | ||||
# Otherwise mark it as closed to silence "Exception ignored in" | # Otherwise mark it as closed to silence "Exception ignored in" | ||||
# message emitted by the interpreter finalizer. | # message emitted by the interpreter finalizer. | ||||
try: | try: | ||||
fp.close() | fp.close() | ||||
except IOError: | except IOError: | ||||
pass | pass | ||||
else: | else: | ||||
def initstdio(): | def initstdio(): | ||||
for fp in (sys.stdin, sys.stdout, sys.stderr): | for fp in (sys.stdin, sys.stdout, sys.stderr): | ||||
procutil.setbinary(fp) | procutil.setbinary(fp) | ||||
def _silencestdio(): | def _silencestdio(): | ||||
pass | pass |
finally: | finally: | ||||
fp.detach() | fp.detach() | ||||
def parsebytes(data): | def parsebytes(data): | ||||
# type: (bytes) -> email.message.Message | # type: (bytes) -> email.message.Message | ||||
ep = email.parser.BytesParser() | ep = email.parser.BytesParser() | ||||
return ep.parsebytes(data) | return ep.parsebytes(data) | ||||
else: | else: | ||||
Generator = email.generator.Generator | Generator = email.generator.Generator | ||||
def parse(fp): | def parse(fp): | ||||
# type: (Any) -> email.message.Message | # type: (Any) -> email.message.Message | ||||
ep = email.parser.Parser() | ep = email.parser.Parser() | ||||
return ep.parse(fp) | return ep.parse(fp) |
class filestore(object): | class filestore(object): | ||||
def __init__(self, maxsize=None): | def __init__(self, maxsize=None): | ||||
self.opener = None | self.opener = None | ||||
self.files = {} | self.files = {} | ||||
self.created = 0 | self.created = 0 | ||||
self.maxsize = maxsize | self.maxsize = maxsize | ||||
if self.maxsize is None: | if self.maxsize is None: | ||||
self.maxsize = 4 * (2 ** 20) | self.maxsize = 4 * (2**20) | ||||
self.size = 0 | self.size = 0 | ||||
self.data = {} | self.data = {} | ||||
def setfile(self, fname, data, mode, copied=None): | def setfile(self, fname, data, mode, copied=None): | ||||
if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize: | if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize: | ||||
self.data[fname] = (data, mode, copied) | self.data[fname] = (data, mode, copied) | ||||
self.size += len(data) | self.size += len(data) | ||||
else: | else: |
def posixfile(name, mode='r', buffering=-1): | def posixfile(name, mode='r', buffering=-1): | ||||
fp = open(name, mode=mode, buffering=buffering) | fp = open(name, mode=mode, buffering=buffering) | ||||
# The position when opening in append mode is implementation defined, so | # The position when opening in append mode is implementation defined, so | ||||
# make it consistent by always seeking to the end. | # make it consistent by always seeking to the end. | ||||
if 'a' in mode: | if 'a' in mode: | ||||
fp.seek(0, os.SEEK_END) | fp.seek(0, os.SEEK_END) | ||||
return fp | return fp | ||||
else: | else: | ||||
# The underlying file object seeks as required in Python 3: | # The underlying file object seeks as required in Python 3: | ||||
# https://github.com/python/cpython/blob/v3.7.3/Modules/_io/fileio.c#L474 | # https://github.com/python/cpython/blob/v3.7.3/Modules/_io/fileio.c#L474 | ||||
posixfile = open | posixfile = open | ||||
def split(p): | def split(p): | ||||
"""Same as posixpath.split, but faster | """Same as posixpath.split, but faster |
): | ): | ||||
return [] | return [] | ||||
rfds = ctypes.cast(cmsg.cmsg_data, ctypes.POINTER(ctypes.c_int)) | rfds = ctypes.cast(cmsg.cmsg_data, ctypes.POINTER(ctypes.c_int)) | ||||
rfdscount = ( | rfdscount = ( | ||||
cmsg.cmsg_len - _cmsghdr.cmsg_data.offset | cmsg.cmsg_len - _cmsghdr.cmsg_data.offset | ||||
) // ctypes.sizeof(ctypes.c_int) | ) // ctypes.sizeof(ctypes.c_int) | ||||
return [rfds[i] for i in pycompat.xrange(rfdscount)] | return [rfds[i] for i in pycompat.xrange(rfdscount)] | ||||
else: | else: | ||||
import msvcrt | import msvcrt | ||||
_kernel32 = ctypes.windll.kernel32 # pytype: disable=module-attr | _kernel32 = ctypes.windll.kernel32 # pytype: disable=module-attr | ||||
_DWORD = ctypes.c_ulong | _DWORD = ctypes.c_ulong | ||||
_LPCSTR = _LPSTR = ctypes.c_char_p | _LPCSTR = _LPSTR = ctypes.c_char_p | ||||
_HANDLE = ctypes.c_void_p | _HANDLE = ctypes.c_void_p |
if util.safehasattr(parsers, 'parse_index_devel_nodemap'): | if util.safehasattr(parsers, 'parse_index_devel_nodemap'): | ||||
def parse_index_v1_nodemap(data, inline): | def parse_index_v1_nodemap(data, inline): | ||||
index, cache = parsers.parse_index_devel_nodemap(data, inline) | index, cache = parsers.parse_index_devel_nodemap(data, inline) | ||||
return index, cache | return index, cache | ||||
else: | else: | ||||
parse_index_v1_nodemap = None | parse_index_v1_nodemap = None | ||||
def parse_index_v1_mixed(data, inline): | def parse_index_v1_mixed(data, inline): | ||||
index, cache = parse_index_v1(data, inline) | index, cache = parse_index_v1(data, inline) | ||||
return rustrevlog.MixedIndex(index), cache | return rustrevlog.MixedIndex(index), cache | ||||
util, | util, | ||||
vfs as vfsmod, | vfs as vfsmod, | ||||
) | ) | ||||
from .utils import hashutil | from .utils import hashutil | ||||
parsers = policy.importmod('parsers') | parsers = policy.importmod('parsers') | ||||
# how much bytes should be read from fncache in one read | # how much bytes should be read from fncache in one read | ||||
# It is done to prevent loading large fncache files into memory | # It is done to prevent loading large fncache files into memory | ||||
fncache_chunksize = 10 ** 6 | fncache_chunksize = 10**6 | ||||
def _matchtrackedpath(path, matcher): | def _matchtrackedpath(path, matcher): | ||||
"""parses a fncache entry and returns whether the entry is tracking a path | """parses a fncache entry and returns whether the entry is tracking a path | ||||
matched by matcher or not. | matched by matcher or not. | ||||
If matcher is None, returns True""" | If matcher is None, returns True""" | ||||
continue | continue | ||||
item = s[0].getAttribute('item') | item = s[0].getAttribute('item') | ||||
props = s[0].getAttribute('props') | props = s[0].getAttribute('props') | ||||
path = e.getAttribute('path').encode('utf8') | path = e.getAttribute('path').encode('utf8') | ||||
if item == 'external': | if item == 'external': | ||||
externals.append(path) | externals.append(path) | ||||
elif item == 'missing': | elif item == 'missing': | ||||
missing.append(path) | missing.append(path) | ||||
if ( | if item not in ( | ||||
item | |||||
not in ( | |||||
'', | '', | ||||
'normal', | 'normal', | ||||
'unversioned', | 'unversioned', | ||||
'external', | 'external', | ||||
) | ) or props not in ('', 'none', 'normal'): | ||||
or props not in ('', 'none', 'normal') | |||||
): | |||||
changes.append(path) | changes.append(path) | ||||
for path in changes: | for path in changes: | ||||
for ext in externals: | for ext in externals: | ||||
if path == ext or path.startswith(ext + pycompat.ossep): | if path == ext or path.startswith(ext + pycompat.ossep): | ||||
return True, True, bool(missing) | return True, True, bool(missing) | ||||
return bool(changes), False, bool(missing) | return bool(changes), False, bool(missing) | ||||
@annotatesubrepoerror | @annotatesubrepoerror |
return req.selector | return req.selector | ||||
def getdata(req): | def getdata(req): | ||||
return req.data | return req.data | ||||
def hasdata(req): | def hasdata(req): | ||||
return req.data is not None | return req.data is not None | ||||
else: | else: | ||||
# pytype: disable=import-error | # pytype: disable=import-error | ||||
import BaseHTTPServer | import BaseHTTPServer | ||||
import CGIHTTPServer | import CGIHTTPServer | ||||
import SimpleHTTPServer | import SimpleHTTPServer | ||||
import urllib2 | import urllib2 | ||||
import urllib | import urllib | ||||
import urlparse | import urlparse |
"""Allow arbitrary sized chunks of data to be efficiently read from an | """Allow arbitrary sized chunks of data to be efficiently read from an | ||||
iterator over chunks of arbitrary size.""" | iterator over chunks of arbitrary size.""" | ||||
def __init__(self, in_iter): | def __init__(self, in_iter): | ||||
"""in_iter is the iterator that's iterating over the input chunks.""" | """in_iter is the iterator that's iterating over the input chunks.""" | ||||
def splitbig(chunks): | def splitbig(chunks): | ||||
for chunk in chunks: | for chunk in chunks: | ||||
if len(chunk) > 2 ** 20: | if len(chunk) > 2**20: | ||||
pos = 0 | pos = 0 | ||||
while pos < len(chunk): | while pos < len(chunk): | ||||
end = pos + 2 ** 18 | end = pos + 2**18 | ||||
yield chunk[pos:end] | yield chunk[pos:end] | ||||
pos = end | pos = end | ||||
else: | else: | ||||
yield chunk | yield chunk | ||||
self.iter = splitbig(in_iter) | self.iter = splitbig(in_iter) | ||||
self._queue = collections.deque() | self._queue = collections.deque() | ||||
self._chunkoffset = 0 | self._chunkoffset = 0 | ||||
def read(self, l=None): | def read(self, l=None): | ||||
"""Read L bytes of data from the iterator of chunks of data. | """Read L bytes of data from the iterator of chunks of data. | ||||
Returns less than L bytes if the iterator runs dry. | Returns less than L bytes if the iterator runs dry. | ||||
If size parameter is omitted, read everything""" | If size parameter is omitted, read everything""" | ||||
if l is None: | if l is None: | ||||
return b''.join(self.iter) | return b''.join(self.iter) | ||||
left = l | left = l | ||||
buf = [] | buf = [] | ||||
queue = self._queue | queue = self._queue | ||||
while left > 0: | while left > 0: | ||||
# refill the queue | # refill the queue | ||||
if not queue: | if not queue: | ||||
target = 2 ** 18 | target = 2**18 | ||||
for chunk in self.iter: | for chunk in self.iter: | ||||
queue.append(chunk) | queue.append(chunk) | ||||
target -= len(chunk) | target -= len(chunk) | ||||
if target <= 0: | if target <= 0: | ||||
break | break | ||||
if not queue: | if not queue: | ||||
break | break | ||||
if type(fp) is file: | if type(fp) is file: | ||||
fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode) | fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode) | ||||
if fastpath: | if fastpath: | ||||
return fp | return fp | ||||
else: | else: | ||||
# fp.readline deals with EINTR correctly, use it as a workaround. | # fp.readline deals with EINTR correctly, use it as a workaround. | ||||
return iter(fp.readline, b'') | return iter(fp.readline, b'') | ||||
else: | else: | ||||
# PyPy and CPython 3 do not have the EINTR issue thus no workaround needed. | # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed. | ||||
def iterfile(fp): | def iterfile(fp): | ||||
return fp | return fp | ||||
def iterlines(iterator): | def iterlines(iterator): | ||||
# type: (Iterator[bytes]) -> Iterator[bytes] | # type: (Iterator[bytes]) -> Iterator[bytes] | ||||
) | ) | ||||
) | ) | ||||
return result | return result | ||||
return wrapper | return wrapper | ||||
_sizeunits = ( | _sizeunits = ( | ||||
(b'm', 2 ** 20), | (b'm', 2**20), | ||||
(b'k', 2 ** 10), | (b'k', 2**10), | ||||
(b'g', 2 ** 30), | (b'g', 2**30), | ||||
(b'kb', 2 ** 10), | (b'kb', 2**10), | ||||
(b'mb', 2 ** 20), | (b'mb', 2**20), | ||||
(b'gb', 2 ** 30), | (b'gb', 2**30), | ||||
(b'b', 1), | (b'b', 1), | ||||
) | ) | ||||
def sizetoint(s): | def sizetoint(s): | ||||
# type: (bytes) -> int | # type: (bytes) -> int | ||||
"""Convert a space specifier to a byte count. | """Convert a space specifier to a byte count. | ||||
"""Represents an error decoding CBOR.""" | """Represents an error decoding CBOR.""" | ||||
if sys.version_info.major >= 3: | if sys.version_info.major >= 3: | ||||
def _elementtointeger(b, i): | def _elementtointeger(b, i): | ||||
return b[i] | return b[i] | ||||
else: | else: | ||||
def _elementtointeger(b, i): | def _elementtointeger(b, i): | ||||
return ord(b[i]) | return ord(b[i]) | ||||
STRUCT_BIG_UBYTE = struct.Struct('>B') | STRUCT_BIG_UBYTE = struct.Struct('>B') | ||||
STRUCT_BIG_USHORT = struct.Struct(b'>H') | STRUCT_BIG_USHORT = struct.Struct(b'>H') |
else: | else: | ||||
if self._level is None: | if self._level is None: | ||||
z = zlib.compressobj() | z = zlib.compressobj() | ||||
else: | else: | ||||
z = zlib.compressobj(level=self._level) | z = zlib.compressobj(level=self._level) | ||||
parts = [] | parts = [] | ||||
pos = 0 | pos = 0 | ||||
while pos < insize: | while pos < insize: | ||||
pos2 = pos + 2 ** 20 | pos2 = pos + 2**20 | ||||
parts.append(z.compress(data[pos:pos2])) | parts.append(z.compress(data[pos:pos2])) | ||||
pos = pos2 | pos = pos2 | ||||
parts.append(z.flush()) | parts.append(z.flush()) | ||||
if sum(map(len, parts)) < insize: | if sum(map(len, parts)) < insize: | ||||
return b''.join(parts) | return b''.join(parts) | ||||
return None | return None | ||||
stderr=stderr, | stderr=stderr, | ||||
) | ) | ||||
if record_wait is not None: | if record_wait is not None: | ||||
record_wait(p.wait) | record_wait(p.wait) | ||||
finally: | finally: | ||||
if stdin is not None: | if stdin is not None: | ||||
stdin.close() | stdin.close() | ||||
else: | else: | ||||
def runbgcommandpy3( | def runbgcommandpy3( | ||||
cmd, | cmd, | ||||
env, | env, | ||||
shell=False, | shell=False, | ||||
stdout=None, | stdout=None, | ||||
stderr=None, | stderr=None, |
# leading "mercurial." off of the package name, so that these | # leading "mercurial." off of the package name, so that these | ||||
# pseudo resources are found in their directory next to the | # pseudo resources are found in their directory next to the | ||||
# executable. | # executable. | ||||
def _package_path(package): | def _package_path(package): | ||||
dirs = package.split(b".") | dirs = package.split(b".") | ||||
assert dirs[0] == b"mercurial" | assert dirs[0] == b"mercurial" | ||||
return os.path.join(_rootpath, *dirs[1:]) | return os.path.join(_rootpath, *dirs[1:]) | ||||
else: | else: | ||||
datapath = os.path.dirname(os.path.dirname(pycompat.fsencode(__file__))) | datapath = os.path.dirname(os.path.dirname(pycompat.fsencode(__file__))) | ||||
_rootpath = os.path.dirname(datapath) | _rootpath = os.path.dirname(datapath) | ||||
def _package_path(package): | def _package_path(package): | ||||
return os.path.join(_rootpath, *package.split(b".")) | return os.path.join(_rootpath, *package.split(b".")) | ||||
return False | return False | ||||
def contents(package): | def contents(package): | ||||
path = pycompat.fsdecode(_package_path(package)) | path = pycompat.fsdecode(_package_path(package)) | ||||
for p in os.listdir(path): | for p in os.listdir(path): | ||||
yield pycompat.fsencode(p) | yield pycompat.fsencode(p) | ||||
else: | else: | ||||
from .. import encoding | from .. import encoding | ||||
def open_resource(package, name): | def open_resource(package, name): | ||||
return resources.open_binary( # pytype: disable=module-attr | return resources.open_binary( # pytype: disable=module-attr | ||||
pycompat.sysstr(package), pycompat.sysstr(name) | pycompat.sysstr(package), pycompat.sysstr(name) | ||||
) | ) | ||||
_kernel32.PeekNamedPipe.restype = _BOOL | _kernel32.PeekNamedPipe.restype = _BOOL | ||||
def _raiseoserror(name): | def _raiseoserror(name): | ||||
# Force the code to a signed int to avoid an 'int too large' error. | # Force the code to a signed int to avoid an 'int too large' error. | ||||
# See https://bugs.python.org/issue28474 | # See https://bugs.python.org/issue28474 | ||||
code = _kernel32.GetLastError() | code = _kernel32.GetLastError() | ||||
if code > 0x7FFFFFFF: | if code > 0x7FFFFFFF: | ||||
code -= 2 ** 32 | code -= 2**32 | ||||
err = ctypes.WinError(code=code) # pytype: disable=module-attr | err = ctypes.WinError(code=code) # pytype: disable=module-attr | ||||
raise OSError( | raise OSError( | ||||
err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror) | err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror) | ||||
) | ) | ||||
def _getfileinfo(name): | def _getfileinfo(name): | ||||
fh = _kernel32.CreateFileA( | fh = _kernel32.CreateFileA( |
if not ret: | if not ret: | ||||
break | break | ||||
pos += ret | pos += ret | ||||
del view | del view | ||||
del buf[pos:] | del buf[pos:] | ||||
return bytes(buf) | return bytes(buf) | ||||
else: | else: | ||||
def ismainthread(): | def ismainthread(): | ||||
# pytype: disable=module-attr | # pytype: disable=module-attr | ||||
return isinstance(threading.current_thread(), threading._MainThread) | return isinstance(threading.current_thread(), threading._MainThread) | ||||
# pytype: enable=module-attr | # pytype: enable=module-attr | ||||
def _blockingreader(wrapped): | def _blockingreader(wrapped): |
httpserver = util.httpserver | httpserver = util.httpserver | ||||
OptionParser = optparse.OptionParser | OptionParser = optparse.OptionParser | ||||
if os.environ.get('HGIPV6', '0') == '1': | if os.environ.get('HGIPV6', '0') == '1': | ||||
class simplehttpserver(httpserver.httpserver): | class simplehttpserver(httpserver.httpserver): | ||||
address_family = socket.AF_INET6 | address_family = socket.AF_INET6 | ||||
else: | else: | ||||
simplehttpserver = httpserver.httpserver | simplehttpserver = httpserver.httpserver | ||||
class _httprequesthandler(httpserver.simplehttprequesthandler): | class _httprequesthandler(httpserver.simplehttprequesthandler): | ||||
def log_message(self, format, *args): | def log_message(self, format, *args): | ||||
httpserver.simplehttprequesthandler.log_message(self, format, *args) | httpserver.simplehttprequesthandler.log_message(self, format, *args) | ||||
sys.stderr.flush() | sys.stderr.flush() |
if sys.version_info > (3, 5, 0): | if sys.version_info > (3, 5, 0): | ||||
PYTHON3 = True | PYTHON3 = True | ||||
xrange = range # we use xrange in one place, and we'd rather not use range | xrange = range # we use xrange in one place, and we'd rather not use range | ||||
def _sys2bytes(p): | def _sys2bytes(p): | ||||
return p.encode('utf-8') | return p.encode('utf-8') | ||||
elif sys.version_info >= (3, 0, 0): | elif sys.version_info >= (3, 0, 0): | ||||
print( | print( | ||||
'%s is only supported on Python 3.5+ and 2.7, not %s' | '%s is only supported on Python 3.5+ and 2.7, not %s' | ||||
% (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])) | % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])) | ||||
) | ) | ||||
sys.exit(70) # EX_SOFTWARE from `man 3 sysexit` | sys.exit(70) # EX_SOFTWARE from `man 3 sysexit` | ||||
else: | else: | ||||
PYTHON3 = False | PYTHON3 = False |
return p | return p | ||||
return p.encode('utf-8') | return p.encode('utf-8') | ||||
def _bytes2sys(p): | def _bytes2sys(p): | ||||
if p is None: | if p is None: | ||||
return p | return p | ||||
return p.decode('utf-8') | return p.decode('utf-8') | ||||
else: | else: | ||||
def _sys2bytes(p): | def _sys2bytes(p): | ||||
return p | return p | ||||
_bytes2sys = _sys2bytes | _bytes2sys = _sys2bytes | ||||
logfn('# Daemon process %d is stuck') | logfn('# Daemon process %d is stuck') | ||||
elif r == WAIT_FAILED: | elif r == WAIT_FAILED: | ||||
_check(0) # err stored in GetLastError() | _check(0) # err stored in GetLastError() | ||||
except: # re-raises | except: # re-raises | ||||
ctypes.windll.kernel32.CloseHandle(handle) # no _check, keep error | ctypes.windll.kernel32.CloseHandle(handle) # no _check, keep error | ||||
raise | raise | ||||
_check(ctypes.windll.kernel32.CloseHandle(handle)) | _check(ctypes.windll.kernel32.CloseHandle(handle)) | ||||
else: | else: | ||||
def kill(pid, logfn, tryhard=True): | def kill(pid, logfn, tryhard=True): | ||||
try: | try: | ||||
os.kill(pid, 0) | os.kill(pid, 0) | ||||
logfn('# Killing daemon process %d' % pid) | logfn('# Killing daemon process %d' % pid) | ||||
os.kill(pid, signal.SIGTERM) | os.kill(pid, signal.SIGTERM) | ||||
if tryhard: | if tryhard: |
1048575, | 1048575, | ||||
1048576, | 1048576, | ||||
1048577, | 1048577, | ||||
] | ] | ||||
for size in lens: | for size in lens: | ||||
if size < 24: | if size < 24: | ||||
hlen = 1 | hlen = 1 | ||||
elif size < 2 ** 8: | elif size < 2**8: | ||||
hlen = 2 | hlen = 2 | ||||
elif size < 2 ** 16: | elif size < 2**16: | ||||
hlen = 3 | hlen = 3 | ||||
elif size < 2 ** 32: | elif size < 2**32: | ||||
hlen = 5 | hlen = 5 | ||||
else: | else: | ||||
assert False | assert False | ||||
source = b'x' * size | source = b'x' * size | ||||
encoded = b''.join(cborutil.streamencode(source)) | encoded = b''.join(cborutil.streamencode(source)) | ||||
res = cborutil.decodeitem(encoded[0:1]) | res = cborutil.decodeitem(encoded[0:1]) | ||||
(False, None, -1, cborutil.SPECIAL_NONE), | (False, None, -1, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:2]), | cborutil.decodeitem(encoded[0:2]), | ||||
(True, -42, 2, cborutil.SPECIAL_NONE), | (True, -42, 2, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
def testdecodepartialushort(self): | def testdecodepartialushort(self): | ||||
encoded = b''.join(cborutil.streamencode(2 ** 15)) | encoded = b''.join(cborutil.streamencode(2**15)) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:1]), | cborutil.decodeitem(encoded[0:1]), | ||||
(False, None, -2, cborutil.SPECIAL_NONE), | (False, None, -2, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:2]), | cborutil.decodeitem(encoded[0:2]), | ||||
(False, None, -1, cborutil.SPECIAL_NONE), | (False, None, -1, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:5]), | cborutil.decodeitem(encoded[0:5]), | ||||
(True, 2 ** 15, 3, cborutil.SPECIAL_NONE), | (True, 2**15, 3, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
def testdecodepartialshort(self): | def testdecodepartialshort(self): | ||||
encoded = b''.join(cborutil.streamencode(-1024)) | encoded = b''.join(cborutil.streamencode(-1024)) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:1]), | cborutil.decodeitem(encoded[0:1]), | ||||
(False, None, -2, cborutil.SPECIAL_NONE), | (False, None, -2, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:2]), | cborutil.decodeitem(encoded[0:2]), | ||||
(False, None, -1, cborutil.SPECIAL_NONE), | (False, None, -1, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:3]), | cborutil.decodeitem(encoded[0:3]), | ||||
(True, -1024, 3, cborutil.SPECIAL_NONE), | (True, -1024, 3, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
def testdecodepartialulong(self): | def testdecodepartialulong(self): | ||||
encoded = b''.join(cborutil.streamencode(2 ** 28)) | encoded = b''.join(cborutil.streamencode(2**28)) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:1]), | cborutil.decodeitem(encoded[0:1]), | ||||
(False, None, -4, cborutil.SPECIAL_NONE), | (False, None, -4, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:2]), | cborutil.decodeitem(encoded[0:2]), | ||||
(False, None, -3, cborutil.SPECIAL_NONE), | (False, None, -3, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:3]), | cborutil.decodeitem(encoded[0:3]), | ||||
(False, None, -2, cborutil.SPECIAL_NONE), | (False, None, -2, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:4]), | cborutil.decodeitem(encoded[0:4]), | ||||
(False, None, -1, cborutil.SPECIAL_NONE), | (False, None, -1, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:5]), | cborutil.decodeitem(encoded[0:5]), | ||||
(True, 2 ** 28, 5, cborutil.SPECIAL_NONE), | (True, 2**28, 5, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
def testdecodepartiallong(self): | def testdecodepartiallong(self): | ||||
encoded = b''.join(cborutil.streamencode(-1048580)) | encoded = b''.join(cborutil.streamencode(-1048580)) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:1]), | cborutil.decodeitem(encoded[0:1]), | ||||
(False, None, -4, cborutil.SPECIAL_NONE), | (False, None, -4, cborutil.SPECIAL_NONE), | ||||
(False, None, -1, cborutil.SPECIAL_NONE), | (False, None, -1, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:5]), | cborutil.decodeitem(encoded[0:5]), | ||||
(True, -1048580, 5, cborutil.SPECIAL_NONE), | (True, -1048580, 5, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
def testdecodepartialulonglong(self): | def testdecodepartialulonglong(self): | ||||
encoded = b''.join(cborutil.streamencode(2 ** 32)) | encoded = b''.join(cborutil.streamencode(2**32)) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:1]), | cborutil.decodeitem(encoded[0:1]), | ||||
(False, None, -8, cborutil.SPECIAL_NONE), | (False, None, -8, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:2]), | cborutil.decodeitem(encoded[0:2]), | ||||
(False, None, -7, cborutil.SPECIAL_NONE), | (False, None, -7, cborutil.SPECIAL_NONE), | ||||
(False, None, -2, cborutil.SPECIAL_NONE), | (False, None, -2, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:8]), | cborutil.decodeitem(encoded[0:8]), | ||||
(False, None, -1, cborutil.SPECIAL_NONE), | (False, None, -1, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
cborutil.decodeitem(encoded[0:9]), | cborutil.decodeitem(encoded[0:9]), | ||||
(True, 2 ** 32, 9, cborutil.SPECIAL_NONE), | (True, 2**32, 9, cborutil.SPECIAL_NONE), | ||||
) | ) | ||||
with self.assertRaisesRegex( | with self.assertRaisesRegex( | ||||
cborutil.CBORDecodeError, 'input data not fully consumed' | cborutil.CBORDecodeError, 'input data not fully consumed' | ||||
): | ): | ||||
cborutil.decodeall(encoded[0:1]) | cborutil.decodeall(encoded[0:1]) | ||||
with self.assertRaisesRegex( | with self.assertRaisesRegex( |
def testPackMetadata(self): | def testPackMetadata(self): | ||||
revisions = [] | revisions = [] | ||||
for i in range(100): | for i in range(100): | ||||
filename = b'%d.txt' % i | filename = b'%d.txt' % i | ||||
content = b'put-something-here \n' * i | content = b'put-something-here \n' * i | ||||
node = self.getHash(content) | node = self.getHash(content) | ||||
meta = { | meta = { | ||||
constants.METAKEYFLAG: i ** 4, | constants.METAKEYFLAG: i**4, | ||||
constants.METAKEYSIZE: len(content), | constants.METAKEYSIZE: len(content), | ||||
b'Z': b'random_string', | b'Z': b'random_string', | ||||
b'_': b'\0' * i, | b'_': b'\0' * i, | ||||
} | } | ||||
revisions.append( | revisions.append( | ||||
(filename, node, sha1nodeconstants.nullid, content, meta) | (filename, node, sha1nodeconstants.nullid, content, meta) | ||||
) | ) | ||||
pack = self.createPack(revisions) | pack = self.createPack(revisions) |
def genbits(n): | def genbits(n): | ||||
"""Given a number n, generate (2 ** (n * 2) + 1) numbers in range(2 ** n). | """Given a number n, generate (2 ** (n * 2) + 1) numbers in range(2 ** n). | ||||
i.e. the generated numbers have a width of n bits. | i.e. the generated numbers have a width of n bits. | ||||
The combination of two adjacent numbers will cover all possible cases. | The combination of two adjacent numbers will cover all possible cases. | ||||
That is to say, given any x, y where both x, and y are in range(2 ** n), | That is to say, given any x, y where both x, and y are in range(2 ** n), | ||||
there is an x followed immediately by y in the generated sequence. | there is an x followed immediately by y in the generated sequence. | ||||
""" | """ | ||||
m = 2 ** n | m = 2**n | ||||
# Gray Code. See https://en.wikipedia.org/wiki/Gray_code | # Gray Code. See https://en.wikipedia.org/wiki/Gray_code | ||||
gray = lambda x: x ^ (x >> 1) | gray = lambda x: x ^ (x >> 1) | ||||
reversegray = {gray(i): i for i in range(m)} | reversegray = {gray(i): i for i in range(m)} | ||||
# Generate (n * 2) bit gray code, yield lower n bits as X, and look for | # Generate (n * 2) bit gray code, yield lower n bits as X, and look for | ||||
# the next unused gray code where higher n bits equal to X. | # the next unused gray code where higher n bits equal to X. | ||||
if pycompat.ispy3: | if pycompat.ispy3: | ||||
def set_noninheritable(fd): | def set_noninheritable(fd): | ||||
# On Python 3, file descriptors are non-inheritable by default. | # On Python 3, file descriptors are non-inheritable by default. | ||||
pass | pass | ||||
else: | else: | ||||
if pycompat.iswindows: | if pycompat.iswindows: | ||||
# unused | # unused | ||||
set_noninheritable = None | set_noninheritable = None | ||||
else: | else: | ||||
import fcntl | import fcntl | ||||
def set_noninheritable(fd): | def set_noninheritable(fd): |
), | ), | ||||
) | ) | ||||
settings.register_profile( | settings.register_profile( | ||||
'continuous', | 'continuous', | ||||
settings( | settings( | ||||
timeout=-1, | timeout=-1, | ||||
stateful_step_count=1000, | stateful_step_count=1000, | ||||
max_examples=10 ** 8, | max_examples=10**8, | ||||
max_iterations=10 ** 8, | max_iterations=10**8, | ||||
database=writeonlydatabase(settings.default.database), | database=writeonlydatabase(settings.default.database), | ||||
), | ), | ||||
) | ) | ||||
settings.load_profile(os.getenv('HYPOTHESIS_PROFILE', 'default')) | settings.load_profile(os.getenv('HYPOTHESIS_PROFILE', 'default')) | ||||
verifyingtest = verifyingstatemachine.TestCase | verifyingtest = verifyingstatemachine.TestCase | ||||