Details
Details
Diff Detail
Diff Detail
- Repository
- rHG Mercurial
- Branch
- default
- Lint
No Linters Available - Unit
No Unit Test Coverage
No Linters Available |
No Unit Test Coverage |
Path | Packages | |||
---|---|---|---|---|
M | mercurial/chgserver.py (4 lines) | |||
M | mercurial/exchange.py (8 lines) | |||
M | mercurial/hg.py (5 lines) | |||
M | mercurial/localrepo.py (4 lines) | |||
M | mercurial/merge.py (4 lines) | |||
M | mercurial/obsolete.py (8 lines) | |||
M | mercurial/patch.py (4 lines) | |||
M | mercurial/repair.py (8 lines) | |||
M | mercurial/revlogutils/sidedata.py (6 lines) | |||
M | mercurial/scmutil.py (4 lines) | |||
M | mercurial/sparse.py (6 lines) | |||
M | mercurial/store.py (4 lines) | |||
M | mercurial/subrepo.py (6 lines) | |||
M | mercurial/util.py (3 lines) | |||
M | mercurial/utils/storageutil.py (6 lines) | |||
M | mercurial/wireprotov1peer.py (4 lines) | |||
M | mercurial/wireprotov2server.py (4 lines) |
Commit | Parents | Author | Summary | Date |
---|---|---|---|---|
4f1fa9d6cce3 | 3c7f49ec86d8 | Augie Fackler | Jan 13 2020, 5:15 PM |
idletimeout = 3600 | idletimeout = 3600 | ||||
# whether to skip config or env change checks | # whether to skip config or env change checks | ||||
skiphash = False | skiphash = False | ||||
""" | """ | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import hashlib | |||||
import inspect | import inspect | ||||
import os | import os | ||||
import re | import re | ||||
import socket | import socket | ||||
import stat | import stat | ||||
import struct | import struct | ||||
import time | import time | ||||
error, | error, | ||||
extensions, | extensions, | ||||
node, | node, | ||||
pycompat, | pycompat, | ||||
util, | util, | ||||
) | ) | ||||
from .utils import ( | from .utils import ( | ||||
hashutil, | |||||
procutil, | procutil, | ||||
stringutil, | stringutil, | ||||
) | ) | ||||
def _hashlist(items): | def _hashlist(items): | ||||
"""return sha1 hexdigest for a list""" | """return sha1 hexdigest for a list""" | ||||
return node.hex(hashlib.sha1(stringutil.pprint(items)).digest()) | return node.hex(hashutil.sha1(stringutil.pprint(items)).digest()) | ||||
# sensitive config sections affecting confighash | # sensitive config sections affecting confighash | ||||
_configsections = [ | _configsections = [ | ||||
b'alias', # affects global state commands.table | b'alias', # affects global state commands.table | ||||
b'eol', # uses setconfig('eol', ...) | b'eol', # uses setconfig('eol', ...) | ||||
b'extdiff', # uisetup will register new commands | b'extdiff', # uisetup will register new commands | ||||
b'extensions', | b'extensions', |
# exchange.py - utility to exchange data between repos. | # exchange.py - utility to exchange data between repos. | ||||
# | # | ||||
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | ||||
# | # | ||||
# This software may be used and distributed according to the terms of the | # This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | # GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import collections | import collections | ||||
import hashlib | |||||
from .i18n import _ | from .i18n import _ | ||||
from .node import ( | from .node import ( | ||||
hex, | hex, | ||||
nullid, | nullid, | ||||
nullrev, | nullrev, | ||||
) | ) | ||||
from .thirdparty import attr | from .thirdparty import attr | ||||
scmutil, | scmutil, | ||||
sslutil, | sslutil, | ||||
streamclone, | streamclone, | ||||
url as urlmod, | url as urlmod, | ||||
util, | util, | ||||
wireprototypes, | wireprototypes, | ||||
) | ) | ||||
from .interfaces import repository | from .interfaces import repository | ||||
from .utils import stringutil | from .utils import ( | ||||
hashutil, | |||||
stringutil, | |||||
) | |||||
urlerr = util.urlerr | urlerr = util.urlerr | ||||
urlreq = util.urlreq | urlreq = util.urlreq | ||||
_NARROWACL_SECTION = b'narrowacl' | _NARROWACL_SECTION = b'narrowacl' | ||||
# Maps bundle version human names to changegroup versions. | # Maps bundle version human names to changegroup versions. | ||||
_bundlespeccgversions = { | _bundlespeccgversions = { | ||||
def check_heads(repo, their_heads, context): | def check_heads(repo, their_heads, context): | ||||
"""check if the heads of a repo have been modified | """check if the heads of a repo have been modified | ||||
Used by peer for unbundling. | Used by peer for unbundling. | ||||
""" | """ | ||||
heads = repo.heads() | heads = repo.heads() | ||||
heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest() | heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest() | ||||
if not ( | if not ( | ||||
their_heads == [b'force'] | their_heads == [b'force'] | ||||
or their_heads == heads | or their_heads == heads | ||||
or their_heads == [b'hashed', heads_hash] | or their_heads == [b'hashed', heads_hash] | ||||
): | ): | ||||
# someone else committed/pushed/unbundled while we | # someone else committed/pushed/unbundled while we | ||||
# were transferring data | # were transferring data | ||||
raise error.PushRaced( | raise error.PushRaced( |
# hg.py - repository classes for mercurial | # hg.py - repository classes for mercurial | ||||
# | # | ||||
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | ||||
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | ||||
# | # | ||||
# This software may be used and distributed according to the terms of the | # This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | # GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import errno | import errno | ||||
import hashlib | |||||
import os | import os | ||||
import shutil | import shutil | ||||
import stat | import stat | ||||
from .i18n import _ | from .i18n import _ | ||||
from .node import nullid | from .node import nullid | ||||
from .pycompat import getattr | from .pycompat import getattr | ||||
statichttprepo, | statichttprepo, | ||||
ui as uimod, | ui as uimod, | ||||
unionrepo, | unionrepo, | ||||
url, | url, | ||||
util, | util, | ||||
verify as verifymod, | verify as verifymod, | ||||
vfs as vfsmod, | vfs as vfsmod, | ||||
) | ) | ||||
from .utils import hashutil | |||||
from .interfaces import repository as repositorymod | from .interfaces import repository as repositorymod | ||||
release = lock.release | release = lock.release | ||||
# shared features | # shared features | ||||
sharedbookmarks = b'bookmarks' | sharedbookmarks = b'bookmarks' | ||||
ui.status( | ui.status( | ||||
_( | _( | ||||
b'(not using pooled storage: ' | b'(not using pooled storage: ' | ||||
b'unable to resolve identity of remote)\n' | b'unable to resolve identity of remote)\n' | ||||
) | ) | ||||
) | ) | ||||
elif sharenamemode == b'remote': | elif sharenamemode == b'remote': | ||||
sharepath = os.path.join( | sharepath = os.path.join( | ||||
sharepool, node.hex(hashlib.sha1(source).digest()) | sharepool, node.hex(hashutil.sha1(source).digest()) | ||||
) | ) | ||||
else: | else: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'unknown share naming mode: %s') % sharenamemode | _(b'unknown share naming mode: %s') % sharenamemode | ||||
) | ) | ||||
# TODO this is a somewhat arbitrary restriction. | # TODO this is a somewhat arbitrary restriction. | ||||
if narrow: | if narrow: |
# localrepo.py - read/write repository class for mercurial | # localrepo.py - read/write repository class for mercurial | ||||
# | # | ||||
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | ||||
# | # | ||||
# This software may be used and distributed according to the terms of the | # This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | # GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import errno | import errno | ||||
import hashlib | |||||
import os | import os | ||||
import random | import random | ||||
import sys | import sys | ||||
import time | import time | ||||
import weakref | import weakref | ||||
from .i18n import _ | from .i18n import _ | ||||
from .node import ( | from .node import ( | ||||
) | ) | ||||
from .interfaces import ( | from .interfaces import ( | ||||
repository, | repository, | ||||
util as interfaceutil, | util as interfaceutil, | ||||
) | ) | ||||
from .utils import ( | from .utils import ( | ||||
hashutil, | |||||
procutil, | procutil, | ||||
stringutil, | stringutil, | ||||
) | ) | ||||
from .revlogutils import constants as revlogconst | from .revlogutils import constants as revlogconst | ||||
release = lockmod.release | release = lockmod.release | ||||
urlerr = util.urlerr | urlerr = util.urlerr | ||||
# abort here if the journal already exists | # abort here if the journal already exists | ||||
if self.svfs.exists(b"journal"): | if self.svfs.exists(b"journal"): | ||||
raise error.RepoError( | raise error.RepoError( | ||||
_(b"abandoned transaction found"), | _(b"abandoned transaction found"), | ||||
hint=_(b"run 'hg recover' to clean up transaction"), | hint=_(b"run 'hg recover' to clean up transaction"), | ||||
) | ) | ||||
idbase = b"%.40f#%f" % (random.random(), time.time()) | idbase = b"%.40f#%f" % (random.random(), time.time()) | ||||
ha = hex(hashlib.sha1(idbase).digest()) | ha = hex(hashutil.sha1(idbase).digest()) | ||||
txnid = b'TXN:' + ha | txnid = b'TXN:' + ha | ||||
self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid) | self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid) | ||||
self._writejournal(desc) | self._writejournal(desc) | ||||
renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()] | renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()] | ||||
if report: | if report: | ||||
rp = report | rp = report | ||||
else: | else: |
# merge.py - directory-level update/merge handling for Mercurial | # merge.py - directory-level update/merge handling for Mercurial | ||||
# | # | ||||
# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> | ||||
# | # | ||||
# This software may be used and distributed according to the terms of the | # This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | # GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import errno | import errno | ||||
import hashlib | |||||
import shutil | import shutil | ||||
import stat | import stat | ||||
import struct | import struct | ||||
from .i18n import _ | from .i18n import _ | ||||
from .node import ( | from .node import ( | ||||
addednodeid, | addednodeid, | ||||
bin, | bin, | ||||
obsutil, | obsutil, | ||||
pathutil, | pathutil, | ||||
pycompat, | pycompat, | ||||
scmutil, | scmutil, | ||||
subrepoutil, | subrepoutil, | ||||
util, | util, | ||||
worker, | worker, | ||||
) | ) | ||||
from .utils import hashutil | |||||
_pack = struct.pack | _pack = struct.pack | ||||
_unpack = struct.unpack | _unpack = struct.unpack | ||||
def _droponode(data): | def _droponode(data): | ||||
# used for compatibility for v1 | # used for compatibility for v1 | ||||
bits = data.split(b'\0') | bits = data.split(b'\0') | ||||
f.write(_pack(format, key, len(data), data)) | f.write(_pack(format, key, len(data), data)) | ||||
f.close() | f.close() | ||||
@staticmethod | @staticmethod | ||||
def getlocalkey(path): | def getlocalkey(path): | ||||
"""hash the path of a local file context for storage in the .hg/merge | """hash the path of a local file context for storage in the .hg/merge | ||||
directory.""" | directory.""" | ||||
return hex(hashlib.sha1(path).digest()) | return hex(hashutil.sha1(path).digest()) | ||||
def add(self, fcl, fco, fca, fd): | def add(self, fcl, fco, fca, fd): | ||||
"""add a new (potentially?) conflicting file the merge state | """add a new (potentially?) conflicting file the merge state | ||||
fcl: file context for local, | fcl: file context for local, | ||||
fco: file context for remote, | fco: file context for remote, | ||||
fca: file context for ancestors, | fca: file context for ancestors, | ||||
fd: file path of the resulting merge. | fd: file path of the resulting merge. | ||||
The header is followed by the markers. Marker format depend of the version. See | The header is followed by the markers. Marker format depend of the version. See | ||||
comment associated with each format for details. | comment associated with each format for details. | ||||
""" | """ | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import errno | import errno | ||||
import hashlib | |||||
import struct | import struct | ||||
from .i18n import _ | from .i18n import _ | ||||
from .pycompat import getattr | from .pycompat import getattr | ||||
from . import ( | from . import ( | ||||
encoding, | encoding, | ||||
error, | error, | ||||
node, | node, | ||||
obsutil, | obsutil, | ||||
phases, | phases, | ||||
policy, | policy, | ||||
pycompat, | pycompat, | ||||
util, | util, | ||||
) | ) | ||||
from .utils import dateutil | from .utils import ( | ||||
dateutil, | |||||
hashutil, | |||||
) | |||||
parsers = policy.importmod('parsers') | parsers = policy.importmod('parsers') | ||||
_pack = struct.pack | _pack = struct.pack | ||||
_unpack = struct.unpack | _unpack = struct.unpack | ||||
_calcsize = struct.calcsize | _calcsize = struct.calcsize | ||||
propertycache = util.propertycache | propertycache = util.propertycache | ||||
divergent.add(rev) | divergent.add(rev) | ||||
break | break | ||||
toprocess.update(obsstore.predecessors.get(prec, ())) | toprocess.update(obsstore.predecessors.get(prec, ())) | ||||
return divergent | return divergent | ||||
def makefoldid(relation, user): | def makefoldid(relation, user): | ||||
folddigest = hashlib.sha1(user) | folddigest = hashutil.sha1(user) | ||||
for p in relation[0] + relation[1]: | for p in relation[0] + relation[1]: | ||||
folddigest.update(b'%d' % p.rev()) | folddigest.update(b'%d' % p.rev()) | ||||
folddigest.update(p.node()) | folddigest.update(p.node()) | ||||
# Since fold only has to compete against fold for the same successors, it | # Since fold only has to compete against fold for the same successors, it | ||||
# seems fine to use a small ID. Smaller ID save space. | # seems fine to use a small ID. Smaller ID save space. | ||||
return node.hex(folddigest.digest())[:8] | return node.hex(folddigest.digest())[:8] | ||||
# patch.py - patch file parsing routines | # patch.py - patch file parsing routines | ||||
# | # | ||||
# Copyright 2006 Brendan Cully <brendan@kublai.com> | # Copyright 2006 Brendan Cully <brendan@kublai.com> | ||||
# Copyright 2007 Chris Mason <chris.mason@oracle.com> | # Copyright 2007 Chris Mason <chris.mason@oracle.com> | ||||
# | # | ||||
# This software may be used and distributed according to the terms of the | # This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | # GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import, print_function | from __future__ import absolute_import, print_function | ||||
import collections | import collections | ||||
import contextlib | import contextlib | ||||
import copy | import copy | ||||
import errno | import errno | ||||
import hashlib | |||||
import os | import os | ||||
import re | import re | ||||
import shutil | import shutil | ||||
import zlib | import zlib | ||||
from .i18n import _ | from .i18n import _ | ||||
from .node import ( | from .node import ( | ||||
hex, | hex, | ||||
pycompat, | pycompat, | ||||
scmutil, | scmutil, | ||||
similar, | similar, | ||||
util, | util, | ||||
vfs as vfsmod, | vfs as vfsmod, | ||||
) | ) | ||||
from .utils import ( | from .utils import ( | ||||
dateutil, | dateutil, | ||||
hashutil, | |||||
procutil, | procutil, | ||||
stringutil, | stringutil, | ||||
) | ) | ||||
stringio = util.stringio | stringio = util.stringio | ||||
gitre = re.compile(br'diff --git a/(.*) b/(.*)') | gitre = re.compile(br'diff --git a/(.*) b/(.*)') | ||||
tabsplitter = re.compile(br'(\t+|[^\t]+)') | tabsplitter = re.compile(br'(\t+|[^\t]+)') | ||||
pathfn is applied to every path in the diff output. | pathfn is applied to every path in the diff output. | ||||
''' | ''' | ||||
def gitindex(text): | def gitindex(text): | ||||
if not text: | if not text: | ||||
text = b"" | text = b"" | ||||
l = len(text) | l = len(text) | ||||
s = hashlib.sha1(b'blob %d\0' % l) | s = hashutil.sha1(b'blob %d\0' % l) | ||||
s.update(text) | s.update(text) | ||||
return hex(s.digest()) | return hex(s.digest()) | ||||
if opts.noprefix: | if opts.noprefix: | ||||
aprefix = bprefix = b'' | aprefix = bprefix = b'' | ||||
else: | else: | ||||
aprefix = b'a/' | aprefix = b'a/' | ||||
bprefix = b'b/' | bprefix = b'b/' |
# repair.py - functions for repository repair for mercurial | # repair.py - functions for repository repair for mercurial | ||||
# | # | ||||
# Copyright 2005, 2006 Chris Mason <mason@suse.com> | # Copyright 2005, 2006 Chris Mason <mason@suse.com> | ||||
# Copyright 2007 Matt Mackall | # Copyright 2007 Matt Mackall | ||||
# | # | ||||
# This software may be used and distributed according to the terms of the | # This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | # GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import errno | import errno | ||||
import hashlib | |||||
from .i18n import _ | from .i18n import _ | ||||
from .node import ( | from .node import ( | ||||
hex, | hex, | ||||
short, | short, | ||||
) | ) | ||||
from . import ( | from . import ( | ||||
bundle2, | bundle2, | ||||
changegroup, | changegroup, | ||||
discovery, | discovery, | ||||
error, | error, | ||||
exchange, | exchange, | ||||
obsolete, | obsolete, | ||||
obsutil, | obsutil, | ||||
pathutil, | pathutil, | ||||
phases, | phases, | ||||
pycompat, | pycompat, | ||||
util, | util, | ||||
) | ) | ||||
from .utils import stringutil | from .utils import ( | ||||
hashutil, | |||||
stringutil, | |||||
) | |||||
def backupbundle( | def backupbundle( | ||||
repo, bases, heads, node, suffix, compress=True, obsolescence=True | repo, bases, heads, node, suffix, compress=True, obsolescence=True | ||||
): | ): | ||||
"""create a bundle with the specified revisions as a backup""" | """create a bundle with the specified revisions as a backup""" | ||||
backupdir = b"strip-backup" | backupdir = b"strip-backup" | ||||
vfs = repo.vfs | vfs = repo.vfs | ||||
if not vfs.isdir(backupdir): | if not vfs.isdir(backupdir): | ||||
vfs.mkdir(backupdir) | vfs.mkdir(backupdir) | ||||
# Include a hash of all the nodes in the filename for uniqueness | # Include a hash of all the nodes in the filename for uniqueness | ||||
allcommits = repo.set(b'%ln::%ln', bases, heads) | allcommits = repo.set(b'%ln::%ln', bases, heads) | ||||
allhashes = sorted(c.hex() for c in allcommits) | allhashes = sorted(c.hex() for c in allcommits) | ||||
totalhash = hashlib.sha1(b''.join(allhashes)).digest() | totalhash = hashutil.sha1(b''.join(allhashes)).digest() | ||||
name = b"%s/%s-%s-%s.hg" % ( | name = b"%s/%s-%s-%s.hg" % ( | ||||
backupdir, | backupdir, | ||||
short(node), | short(node), | ||||
hex(totalhash[:4]), | hex(totalhash[:4]), | ||||
suffix, | suffix, | ||||
) | ) | ||||
cgversion = changegroup.localversion(repo) | cgversion = changegroup.localversion(repo) |
<all bytes remaining in the rawtext> | <all bytes remaining in the rawtext> | ||||
This is a simple and effective format. It should be enought to experiment with | This is a simple and effective format. It should be enought to experiment with | ||||
the concept. | the concept. | ||||
""" | """ | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import hashlib | |||||
import struct | import struct | ||||
from .. import error | from .. import error | ||||
from ..utils import hashutil | |||||
## sidedata type constant | ## sidedata type constant | ||||
# reserve a block for testing purposes. | # reserve a block for testing purposes. | ||||
SD_TEST1 = 1 | SD_TEST1 = 1 | ||||
SD_TEST2 = 2 | SD_TEST2 = 2 | ||||
SD_TEST3 = 3 | SD_TEST3 = 3 | ||||
SD_TEST4 = 4 | SD_TEST4 = 4 | ||||
SD_TEST5 = 5 | SD_TEST5 = 5 | ||||
SIDEDATA_ENTRY = struct.Struct('>HL20s') | SIDEDATA_ENTRY = struct.Struct('>HL20s') | ||||
def sidedatawriteprocessor(rl, text, sidedata): | def sidedatawriteprocessor(rl, text, sidedata): | ||||
sidedata = list(sidedata.items()) | sidedata = list(sidedata.items()) | ||||
sidedata.sort() | sidedata.sort() | ||||
rawtext = [SIDEDATA_HEADER.pack(len(sidedata))] | rawtext = [SIDEDATA_HEADER.pack(len(sidedata))] | ||||
for key, value in sidedata: | for key, value in sidedata: | ||||
digest = hashlib.sha1(value).digest() | digest = hashutil.sha1(value).digest() | ||||
rawtext.append(SIDEDATA_ENTRY.pack(key, len(value), digest)) | rawtext.append(SIDEDATA_ENTRY.pack(key, len(value), digest)) | ||||
for key, value in sidedata: | for key, value in sidedata: | ||||
rawtext.append(value) | rawtext.append(value) | ||||
rawtext.append(bytes(text)) | rawtext.append(bytes(text)) | ||||
return b''.join(rawtext), False | return b''.join(rawtext), False | ||||
def sidedatareadprocessor(rl, text): | def sidedatareadprocessor(rl, text): | ||||
sidedata = {} | sidedata = {} | ||||
offset = 0 | offset = 0 | ||||
(nbentry,) = SIDEDATA_HEADER.unpack(text[: SIDEDATA_HEADER.size]) | (nbentry,) = SIDEDATA_HEADER.unpack(text[: SIDEDATA_HEADER.size]) | ||||
offset += SIDEDATA_HEADER.size | offset += SIDEDATA_HEADER.size | ||||
dataoffset = SIDEDATA_HEADER.size + (SIDEDATA_ENTRY.size * nbentry) | dataoffset = SIDEDATA_HEADER.size + (SIDEDATA_ENTRY.size * nbentry) | ||||
for i in range(nbentry): | for i in range(nbentry): | ||||
nextoffset = offset + SIDEDATA_ENTRY.size | nextoffset = offset + SIDEDATA_ENTRY.size | ||||
key, size, storeddigest = SIDEDATA_ENTRY.unpack(text[offset:nextoffset]) | key, size, storeddigest = SIDEDATA_ENTRY.unpack(text[offset:nextoffset]) | ||||
offset = nextoffset | offset = nextoffset | ||||
# read the data associated with that entry | # read the data associated with that entry | ||||
nextdataoffset = dataoffset + size | nextdataoffset = dataoffset + size | ||||
entrytext = text[dataoffset:nextdataoffset] | entrytext = text[dataoffset:nextdataoffset] | ||||
readdigest = hashlib.sha1(entrytext).digest() | readdigest = hashutil.sha1(entrytext).digest() | ||||
if storeddigest != readdigest: | if storeddigest != readdigest: | ||||
raise error.SidedataHashError(key, storeddigest, readdigest) | raise error.SidedataHashError(key, storeddigest, readdigest) | ||||
sidedata[key] = entrytext | sidedata[key] = entrytext | ||||
dataoffset = nextdataoffset | dataoffset = nextdataoffset | ||||
text = text[dataoffset:] | text = text[dataoffset:] | ||||
return text, True, sidedata | return text, True, sidedata | ||||
# scmutil.py - Mercurial core utility functions | # scmutil.py - Mercurial core utility functions | ||||
# | # | ||||
# Copyright Matt Mackall <mpm@selenic.com> | # Copyright Matt Mackall <mpm@selenic.com> | ||||
# | # | ||||
# This software may be used and distributed according to the terms of the | # This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | # GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import errno | import errno | ||||
import glob | import glob | ||||
import hashlib | |||||
import os | import os | ||||
import posixpath | import posixpath | ||||
import re | import re | ||||
import subprocess | import subprocess | ||||
import weakref | import weakref | ||||
from .i18n import _ | from .i18n import _ | ||||
from .node import ( | from .node import ( | ||||
similar, | similar, | ||||
smartset, | smartset, | ||||
url, | url, | ||||
util, | util, | ||||
vfs, | vfs, | ||||
) | ) | ||||
from .utils import ( | from .utils import ( | ||||
hashutil, | |||||
procutil, | procutil, | ||||
stringutil, | stringutil, | ||||
) | ) | ||||
if pycompat.iswindows: | if pycompat.iswindows: | ||||
from . import scmwindows as scmplatform | from . import scmwindows as scmplatform | ||||
else: | else: | ||||
from . import scmposix as scmplatform | from . import scmposix as scmplatform | ||||
that SHA-1 digest. | that SHA-1 digest. | ||||
""" | """ | ||||
cl = repo.changelog | cl = repo.changelog | ||||
if not cl.filteredrevs: | if not cl.filteredrevs: | ||||
return None | return None | ||||
key = None | key = None | ||||
revs = sorted(r for r in cl.filteredrevs if r <= maxrev) | revs = sorted(r for r in cl.filteredrevs if r <= maxrev) | ||||
if revs: | if revs: | ||||
s = hashlib.sha1() | s = hashutil.sha1() | ||||
for rev in revs: | for rev in revs: | ||||
s.update(b'%d;' % rev) | s.update(b'%d;' % rev) | ||||
key = s.digest() | key = s.digest() | ||||
return key | return key | ||||
def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): | def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): | ||||
'''yield every hg repository under path, always recursively. | '''yield every hg repository under path, always recursively. |
# sparse.py - functionality for sparse checkouts | # sparse.py - functionality for sparse checkouts | ||||
# | # | ||||
# Copyright 2014 Facebook, Inc. | # Copyright 2014 Facebook, Inc. | ||||
# | # | ||||
# This software may be used and distributed according to the terms of the | # This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | # GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import hashlib | |||||
import os | import os | ||||
from .i18n import _ | from .i18n import _ | ||||
from .node import ( | from .node import ( | ||||
hex, | hex, | ||||
nullid, | nullid, | ||||
) | ) | ||||
from . import ( | from . import ( | ||||
error, | error, | ||||
match as matchmod, | match as matchmod, | ||||
merge as mergemod, | merge as mergemod, | ||||
pathutil, | pathutil, | ||||
pycompat, | pycompat, | ||||
scmutil, | scmutil, | ||||
util, | util, | ||||
) | ) | ||||
from .utils import hashutil | |||||
# Whether sparse features are enabled. This variable is intended to be | # Whether sparse features are enabled. This variable is intended to be | ||||
# temporary to facilitate porting sparse to core. It should eventually be | # temporary to facilitate porting sparse to core. It should eventually be | ||||
# a per-repo option, possibly a repo requirement. | # a per-repo option, possibly a repo requirement. | ||||
enabled = False | enabled = False | ||||
def parseconfig(ui, raw, action): | def parseconfig(ui, raw, action): | ||||
signature = cache.get(b'signature') | signature = cache.get(b'signature') | ||||
if includetemp: | if includetemp: | ||||
tempsignature = cache.get(b'tempsignature') | tempsignature = cache.get(b'tempsignature') | ||||
else: | else: | ||||
tempsignature = b'0' | tempsignature = b'0' | ||||
if signature is None or (includetemp and tempsignature is None): | if signature is None or (includetemp and tempsignature is None): | ||||
signature = hex(hashlib.sha1(repo.vfs.tryread(b'sparse')).digest()) | signature = hex(hashutil.sha1(repo.vfs.tryread(b'sparse')).digest()) | ||||
cache[b'signature'] = signature | cache[b'signature'] = signature | ||||
if includetemp: | if includetemp: | ||||
raw = repo.vfs.tryread(b'tempsparse') | raw = repo.vfs.tryread(b'tempsparse') | ||||
tempsignature = hex(hashlib.sha1(raw).digest()) | tempsignature = hex(hashutil.sha1(raw).digest()) | ||||
cache[b'tempsignature'] = tempsignature | cache[b'tempsignature'] = tempsignature | ||||
return b'%s %s' % (signature, tempsignature) | return b'%s %s' % (signature, tempsignature) | ||||
def writeconfig(repo, includes, excludes, profiles): | def writeconfig(repo, includes, excludes, profiles): | ||||
"""Write the sparse config file given a sparse configuration.""" | """Write the sparse config file given a sparse configuration.""" | ||||
with repo.vfs(b'sparse', b'wb') as fh: | with repo.vfs(b'sparse', b'wb') as fh: |
# store.py - repository store handling for Mercurial | # store.py - repository store handling for Mercurial | ||||
# | # | ||||
# Copyright 2008 Matt Mackall <mpm@selenic.com> | # Copyright 2008 Matt Mackall <mpm@selenic.com> | ||||
# | # | ||||
# This software may be used and distributed according to the terms of the | # This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | # GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import errno | import errno | ||||
import functools | import functools | ||||
import hashlib | |||||
import os | import os | ||||
import stat | import stat | ||||
from .i18n import _ | from .i18n import _ | ||||
from .pycompat import getattr | from .pycompat import getattr | ||||
from . import ( | from . import ( | ||||
changelog, | changelog, | ||||
error, | error, | ||||
manifest, | manifest, | ||||
node, | node, | ||||
policy, | policy, | ||||
pycompat, | pycompat, | ||||
util, | util, | ||||
vfs as vfsmod, | vfs as vfsmod, | ||||
) | ) | ||||
from .utils import hashutil | |||||
parsers = policy.importmod('parsers') | parsers = policy.importmod('parsers') | ||||
# how much bytes should be read from fncache in one read | # how much bytes should be read from fncache in one read | ||||
# It is done to prevent loading large fncache files into memory | # It is done to prevent loading large fncache files into memory | ||||
fncache_chunksize = 10 ** 6 | fncache_chunksize = 10 ** 6 | ||||
def _matchtrackedpath(path, matcher): | def _matchtrackedpath(path, matcher): | ||||
_maxstorepathlen = 120 | _maxstorepathlen = 120 | ||||
_dirprefixlen = 8 | _dirprefixlen = 8 | ||||
_maxshortdirslen = 8 * (_dirprefixlen + 1) - 4 | _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4 | ||||
def _hashencode(path, dotencode): | def _hashencode(path, dotencode): | ||||
digest = node.hex(hashlib.sha1(path).digest()) | digest = node.hex(hashutil.sha1(path).digest()) | ||||
le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/' | le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/' | ||||
parts = _auxencode(le, dotencode) | parts = _auxencode(le, dotencode) | ||||
basename = parts[-1] | basename = parts[-1] | ||||
_root, ext = os.path.splitext(basename) | _root, ext = os.path.splitext(basename) | ||||
sdirs = [] | sdirs = [] | ||||
sdirslen = 0 | sdirslen = 0 | ||||
for p in parts[:-1]: | for p in parts[:-1]: | ||||
d = p[:_dirprefixlen] | d = p[:_dirprefixlen] |
# subrepo.py - sub-repository classes and factory | # subrepo.py - sub-repository classes and factory | ||||
# | # | ||||
# Copyright 2009-2010 Matt Mackall <mpm@selenic.com> | # Copyright 2009-2010 Matt Mackall <mpm@selenic.com> | ||||
# | # | ||||
# This software may be used and distributed according to the terms of the | # This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | # GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import copy | import copy | ||||
import errno | import errno | ||||
import hashlib | |||||
import os | import os | ||||
import re | import re | ||||
import stat | import stat | ||||
import subprocess | import subprocess | ||||
import sys | import sys | ||||
import tarfile | import tarfile | ||||
import xml.dom.minidom | import xml.dom.minidom | ||||
pycompat, | pycompat, | ||||
scmutil, | scmutil, | ||||
subrepoutil, | subrepoutil, | ||||
util, | util, | ||||
vfs as vfsmod, | vfs as vfsmod, | ||||
) | ) | ||||
from .utils import ( | from .utils import ( | ||||
dateutil, | dateutil, | ||||
hashutil, | |||||
procutil, | procutil, | ||||
stringutil, | stringutil, | ||||
) | ) | ||||
hg = None | hg = None | ||||
reporelpath = subrepoutil.reporelpath | reporelpath = subrepoutil.reporelpath | ||||
subrelpath = subrepoutil.subrelpath | subrelpath = subrepoutil.subrelpath | ||||
_abssource = subrepoutil._abssource | _abssource = subrepoutil._abssource | ||||
propertycache = util.propertycache | propertycache = util.propertycache | ||||
def _expandedabspath(path): | def _expandedabspath(path): | ||||
''' | ''' | ||||
get a path or url and if it is a path expand it and return an absolute path | get a path or url and if it is a path expand it and return an absolute path | ||||
''' | ''' | ||||
expandedpath = util.urllocalpath(util.expandpath(path)) | expandedpath = util.urllocalpath(util.expandpath(path)) | ||||
u = util.url(expandedpath) | u = util.url(expandedpath) | ||||
if not u.scheme: | if not u.scheme: | ||||
path = util.normpath(os.path.abspath(u.path)) | path = util.normpath(os.path.abspath(u.path)) | ||||
return path | return path | ||||
def _getstorehashcachename(remotepath): | def _getstorehashcachename(remotepath): | ||||
'''get a unique filename for the store hash cache of a remote repository''' | '''get a unique filename for the store hash cache of a remote repository''' | ||||
return node.hex(hashlib.sha1(_expandedabspath(remotepath)).digest())[0:12] | return node.hex(hashutil.sha1(_expandedabspath(remotepath)).digest())[0:12] | ||||
class SubrepoAbort(error.Abort): | class SubrepoAbort(error.Abort): | ||||
"""Exception class used to avoid handling a subrepo error more than once""" | """Exception class used to avoid handling a subrepo error more than once""" | ||||
def __init__(self, *args, **kw): | def __init__(self, *args, **kw): | ||||
self.subrepo = kw.pop('subrepo', None) | self.subrepo = kw.pop('subrepo', None) | ||||
self.cause = kw.pop('cause', None) | self.cause = kw.pop('cause', None) | ||||
This method is used to to detect when there are changes that may | This method is used to to detect when there are changes that may | ||||
require a push to a given remote path.''' | require a push to a given remote path.''' | ||||
# sort the files that will be hashed in increasing (likely) file size | # sort the files that will be hashed in increasing (likely) file size | ||||
filelist = (b'bookmarks', b'store/phaseroots', b'store/00changelog.i') | filelist = (b'bookmarks', b'store/phaseroots', b'store/00changelog.i') | ||||
yield b'# %s\n' % _expandedabspath(remotepath) | yield b'# %s\n' % _expandedabspath(remotepath) | ||||
vfs = self._repo.vfs | vfs = self._repo.vfs | ||||
for relname in filelist: | for relname in filelist: | ||||
filehash = node.hex(hashlib.sha1(vfs.tryread(relname)).digest()) | filehash = node.hex(hashutil.sha1(vfs.tryread(relname)).digest()) | ||||
yield b'%s = %s\n' % (relname, filehash) | yield b'%s = %s\n' % (relname, filehash) | ||||
@propertycache | @propertycache | ||||
def _cachestorehashvfs(self): | def _cachestorehashvfs(self): | ||||
return vfsmod.vfs(self._repo.vfs.join(b'cache/storehash')) | return vfsmod.vfs(self._repo.vfs.join(b'cache/storehash')) | ||||
def _readstorehashcache(self, remotepath): | def _readstorehashcache(self, remotepath): | ||||
'''read the store hash cache for a given remote repository''' | '''read the store hash cache for a given remote repository''' |
i18n, | i18n, | ||||
node as nodemod, | node as nodemod, | ||||
policy, | policy, | ||||
pycompat, | pycompat, | ||||
urllibcompat, | urllibcompat, | ||||
) | ) | ||||
from .utils import ( | from .utils import ( | ||||
compression, | compression, | ||||
hashutil, | |||||
procutil, | procutil, | ||||
stringutil, | stringutil, | ||||
) | ) | ||||
base85 = policy.importmod('base85') | base85 = policy.importmod('base85') | ||||
osutil = policy.importmod('osutil') | osutil = policy.importmod('osutil') | ||||
b85decode = base85.b85decode | b85decode = base85.b85decode | ||||
b"\n(compatibility will be dropped after Mercurial-%s," | b"\n(compatibility will be dropped after Mercurial-%s," | ||||
b" update your code.)" | b" update your code.)" | ||||
) % version | ) % version | ||||
warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1) | warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1) | ||||
DIGESTS = { | DIGESTS = { | ||||
b'md5': hashlib.md5, | b'md5': hashlib.md5, | ||||
b'sha1': hashlib.sha1, | b'sha1': hashutil.sha1, | ||||
b'sha512': hashlib.sha512, | b'sha512': hashlib.sha512, | ||||
} | } | ||||
# List of digest types from strongest to weakest | # List of digest types from strongest to weakest | ||||
DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5'] | DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5'] | ||||
for k in DIGESTS_BY_STRENGTH: | for k in DIGESTS_BY_STRENGTH: | ||||
assert k in DIGESTS | assert k in DIGESTS | ||||
# storageutil.py - Storage functionality agnostic of backend implementation. | # storageutil.py - Storage functionality agnostic of backend implementation. | ||||
# | # | ||||
# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com> | # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com> | ||||
# | # | ||||
# This software may be used and distributed according to the terms of the | # This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | # GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import hashlib | |||||
import re | import re | ||||
import struct | import struct | ||||
from ..i18n import _ | from ..i18n import _ | ||||
from ..node import ( | from ..node import ( | ||||
bin, | bin, | ||||
nullid, | nullid, | ||||
nullrev, | nullrev, | ||||
) | ) | ||||
from .. import ( | from .. import ( | ||||
dagop, | dagop, | ||||
error, | error, | ||||
mdiff, | mdiff, | ||||
pycompat, | pycompat, | ||||
) | ) | ||||
from ..interfaces import repository | from ..interfaces import repository | ||||
from ..utils import hashutil | |||||
_nullhash = hashlib.sha1(nullid) | _nullhash = hashutil.sha1(nullid) | ||||
def hashrevisionsha1(text, p1, p2): | def hashrevisionsha1(text, p1, p2): | ||||
"""Compute the SHA-1 for revision data and its parents. | """Compute the SHA-1 for revision data and its parents. | ||||
This hash combines both the current file contents and its history | This hash combines both the current file contents and its history | ||||
in a manner that makes it easy to distinguish nodes with the same | in a manner that makes it easy to distinguish nodes with the same | ||||
content in the revision graph. | content in the revision graph. | ||||
""" | """ | ||||
# As of now, if one of the parent node is null, p2 is null | # As of now, if one of the parent node is null, p2 is null | ||||
if p2 == nullid: | if p2 == nullid: | ||||
# deep copy of a hash is faster than creating one | # deep copy of a hash is faster than creating one | ||||
s = _nullhash.copy() | s = _nullhash.copy() | ||||
s.update(p1) | s.update(p1) | ||||
else: | else: | ||||
# none of the parent nodes are nullid | # none of the parent nodes are nullid | ||||
if p1 < p2: | if p1 < p2: | ||||
a = p1 | a = p1 | ||||
b = p2 | b = p2 | ||||
else: | else: | ||||
a = p2 | a = p2 | ||||
b = p1 | b = p1 | ||||
s = hashlib.sha1(a) | s = hashutil.sha1(a) | ||||
s.update(b) | s.update(b) | ||||
s.update(text) | s.update(text) | ||||
return s.digest() | return s.digest() | ||||
METADATA_RE = re.compile(b'\x01\n') | METADATA_RE = re.compile(b'\x01\n') | ||||
# wireprotov1peer.py - Client-side functionality for wire protocol version 1. | # wireprotov1peer.py - Client-side functionality for wire protocol version 1. | ||||
# | # | ||||
# Copyright 2005-2010 Matt Mackall <mpm@selenic.com> | # Copyright 2005-2010 Matt Mackall <mpm@selenic.com> | ||||
# | # | ||||
# This software may be used and distributed according to the terms of the | # This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | # GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import hashlib | |||||
import sys | import sys | ||||
import weakref | import weakref | ||||
from .i18n import _ | from .i18n import _ | ||||
from .node import bin | from .node import bin | ||||
from .pycompat import ( | from .pycompat import ( | ||||
getattr, | getattr, | ||||
setattr, | setattr, | ||||
) | ) | ||||
from . import ( | from . import ( | ||||
bundle2, | bundle2, | ||||
changegroup as changegroupmod, | changegroup as changegroupmod, | ||||
encoding, | encoding, | ||||
error, | error, | ||||
pushkey as pushkeymod, | pushkey as pushkeymod, | ||||
pycompat, | pycompat, | ||||
util, | util, | ||||
wireprototypes, | wireprototypes, | ||||
) | ) | ||||
from .interfaces import ( | from .interfaces import ( | ||||
repository, | repository, | ||||
util as interfaceutil, | util as interfaceutil, | ||||
) | ) | ||||
from .utils import hashutil | |||||
urlreq = util.urlreq | urlreq = util.urlreq | ||||
def batchable(f): | def batchable(f): | ||||
'''annotation for batchable methods | '''annotation for batchable methods | ||||
Such methods must implement a coroutine as follows: | Such methods must implement a coroutine as follows: | ||||
When pushing a bundle20 stream, return a bundle20 stream. | When pushing a bundle20 stream, return a bundle20 stream. | ||||
`url` is the url the client thinks it's pushing to, which is | `url` is the url the client thinks it's pushing to, which is | ||||
visible to hooks. | visible to hooks. | ||||
''' | ''' | ||||
if heads != [b'force'] and self.capable(b'unbundlehash'): | if heads != [b'force'] and self.capable(b'unbundlehash'): | ||||
heads = wireprototypes.encodelist( | heads = wireprototypes.encodelist( | ||||
[b'hashed', hashlib.sha1(b''.join(sorted(heads))).digest()] | [b'hashed', hashutil.sha1(b''.join(sorted(heads))).digest()] | ||||
) | ) | ||||
else: | else: | ||||
heads = wireprototypes.encodelist(heads) | heads = wireprototypes.encodelist(heads) | ||||
if util.safehasattr(bundle, b'deltaheader'): | if util.safehasattr(bundle, b'deltaheader'): | ||||
# this a bundle10, do the old style call sequence | # this a bundle10, do the old style call sequence | ||||
ret, output = self._callpush(b"unbundle", bundle, heads=heads) | ret, output = self._callpush(b"unbundle", bundle, heads=heads) | ||||
if ret == b"": | if ret == b"": |
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> | ||||
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | ||||
# | # | ||||
# This software may be used and distributed according to the terms of the | # This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | # GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import collections | import collections | ||||
import contextlib | import contextlib | ||||
import hashlib | |||||
from .i18n import _ | from .i18n import _ | ||||
from .node import ( | from .node import ( | ||||
hex, | hex, | ||||
nullid, | nullid, | ||||
) | ) | ||||
from . import ( | from . import ( | ||||
discovery, | discovery, | ||||
encoding, | encoding, | ||||
error, | error, | ||||
match as matchmod, | match as matchmod, | ||||
narrowspec, | narrowspec, | ||||
pycompat, | pycompat, | ||||
streamclone, | streamclone, | ||||
templatefilters, | templatefilters, | ||||
util, | util, | ||||
wireprotoframing, | wireprotoframing, | ||||
wireprototypes, | wireprototypes, | ||||
) | ) | ||||
from .interfaces import util as interfaceutil | from .interfaces import util as interfaceutil | ||||
from .utils import ( | from .utils import ( | ||||
cborutil, | cborutil, | ||||
hashutil, | |||||
stringutil, | stringutil, | ||||
) | ) | ||||
FRAMINGTYPE = b'application/mercurial-exp-framing-0006' | FRAMINGTYPE = b'application/mercurial-exp-framing-0006' | ||||
HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2 | HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2 | ||||
COMMANDS = wireprototypes.commanddict() | COMMANDS = wireprototypes.commanddict() | ||||
# Arguments by their very nature must support being encoded to CBOR. | # Arguments by their very nature must support being encoded to CBOR. | ||||
# And the CBOR encoder is deterministic. So we hash the arguments | # And the CBOR encoder is deterministic. So we hash the arguments | ||||
# by feeding the CBOR of their representation into the hasher. | # by feeding the CBOR of their representation into the hasher. | ||||
if allargs: | if allargs: | ||||
state[b'args'] = pycompat.byteskwargs(args) | state[b'args'] = pycompat.byteskwargs(args) | ||||
cacher.adjustcachekeystate(state) | cacher.adjustcachekeystate(state) | ||||
hasher = hashlib.sha1() | hasher = hashutil.sha1() | ||||
for chunk in cborutil.streamencode(state): | for chunk in cborutil.streamencode(state): | ||||
hasher.update(chunk) | hasher.update(chunk) | ||||
return pycompat.sysbytes(hasher.hexdigest()) | return pycompat.sysbytes(hasher.hexdigest()) | ||||
return cachekeyfn | return cachekeyfn | ||||