Details
Details
- Reviewers
durin42 pulkit - Group Reviewers
hg-reviewers - Commits
- rHG4aa72cdf616f: py3: delete b'' prefix from safehasattr arguments
Diff Detail
Diff Detail
- Repository
- rHG Mercurial
- Lint
Lint Skipped - Unit
Unit Tests Skipped
( )
durin42 | |
pulkit |
hg-reviewers |
Lint Skipped |
Unit Tests Skipped |
Path | Packages | |||
---|---|---|---|---|
M | hgext/absorb.py (2 lines) | |||
M | hgext/bugzilla.py (4 lines) | |||
M | hgext/commitextras.py (2 lines) | |||
M | hgext/fastannotate/commands.py (6 lines) | |||
M | hgext/fsmonitor/watchmanclient.py (2 lines) | |||
M | hgext/journal.py (8 lines) | |||
M | hgext/lfs/wireprotolfsserver.py (2 lines) | |||
M | hgext/narrow/narrowbundle2.py (6 lines) | |||
M | hgext/remotefilelog/connectionpool.py (6 lines) | |||
M | hgext/remotefilelog/fileserverclient.py (4 lines) | |||
M | hgext/remotefilelog/repack.py (10 lines) | |||
M | hgext/remotefilelog/shallowrepo.py (2 lines) | |||
M | mercurial/bundle2.py (10 lines) | |||
M | mercurial/bundlerepo.py (2 lines) | |||
M | mercurial/pvec.py (2 lines) | |||
M | mercurial/registrar.py (2 lines) | |||
M | mercurial/utils/procutil.py (4 lines) |
Commit | Parents | Author | Summary | Date |
---|---|---|---|---|
cca70a6c7b16 | 37af48031d6f | Martin von Zweigbergk | Oct 6 2019, 11:17 PM |
else: | else: | ||||
ctx = self.repo[self.finalnode] | ctx = self.repo[self.finalnode] | ||||
dirstate = self.repo.dirstate | dirstate = self.repo.dirstate | ||||
# dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to | # dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to | ||||
# be slow. in absorb's case, no need to invalidate fsmonitorstate. | # be slow. in absorb's case, no need to invalidate fsmonitorstate. | ||||
noop = lambda: 0 | noop = lambda: 0 | ||||
restore = noop | restore = noop | ||||
if util.safehasattr(dirstate, b'_fsmonitorstate'): | if util.safehasattr(dirstate, '_fsmonitorstate'): | ||||
bak = dirstate._fsmonitorstate.invalidate | bak = dirstate._fsmonitorstate.invalidate | ||||
def restore(): | def restore(): | ||||
dirstate._fsmonitorstate.invalidate = bak | dirstate._fsmonitorstate.invalidate = bak | ||||
dirstate._fsmonitorstate.invalidate = noop | dirstate._fsmonitorstate.invalidate = noop | ||||
try: | try: | ||||
with dirstate.parentchange(): | with dirstate.parentchange(): |
# The explicit calls to the underlying xmlrpclib __init__() methods are | # The explicit calls to the underlying xmlrpclib __init__() methods are | ||||
# necessary. The xmlrpclib.Transport classes are old-style classes, and | # necessary. The xmlrpclib.Transport classes are old-style classes, and | ||||
# it turns out their __init__() doesn't get called when doing multiple | # it turns out their __init__() doesn't get called when doing multiple | ||||
# inheritance with a new-style class. | # inheritance with a new-style class. | ||||
class cookietransport(cookietransportrequest, xmlrpclib.Transport): | class cookietransport(cookietransportrequest, xmlrpclib.Transport): | ||||
def __init__(self, use_datetime=0): | def __init__(self, use_datetime=0): | ||||
if util.safehasattr(xmlrpclib.Transport, b"__init__"): | if util.safehasattr(xmlrpclib.Transport, "__init__"): | ||||
xmlrpclib.Transport.__init__(self, use_datetime) | xmlrpclib.Transport.__init__(self, use_datetime) | ||||
class cookiesafetransport(cookietransportrequest, xmlrpclib.SafeTransport): | class cookiesafetransport(cookietransportrequest, xmlrpclib.SafeTransport): | ||||
def __init__(self, use_datetime=0): | def __init__(self, use_datetime=0): | ||||
if util.safehasattr(xmlrpclib.Transport, b"__init__"): | if util.safehasattr(xmlrpclib.Transport, "__init__"): | ||||
xmlrpclib.SafeTransport.__init__(self, use_datetime) | xmlrpclib.SafeTransport.__init__(self, use_datetime) | ||||
class bzxmlrpc(bzaccess): | class bzxmlrpc(bzaccess): | ||||
"""Support for access to Bugzilla via the Bugzilla XMLRPC API. | """Support for access to Bugzilla via the Bugzilla XMLRPC API. | ||||
Requires a minimum Bugzilla version 3.4. | Requires a minimum Bugzilla version 3.4. | ||||
""" | """ |
[], | [], | ||||
_(b'set a changeset\'s extra values'), | _(b'set a changeset\'s extra values'), | ||||
_(b"KEY=VALUE"), | _(b"KEY=VALUE"), | ||||
) | ) | ||||
) | ) | ||||
def _commit(orig, ui, repo, *pats, **opts): | def _commit(orig, ui, repo, *pats, **opts): | ||||
if util.safehasattr(repo, b'unfiltered'): | if util.safehasattr(repo, 'unfiltered'): | ||||
repo = repo.unfiltered() | repo = repo.unfiltered() | ||||
class repoextra(repo.__class__): | class repoextra(repo.__class__): | ||||
def commit(self, *innerpats, **inneropts): | def commit(self, *innerpats, **inneropts): | ||||
extras = opts.get(r'extra') | extras = opts.get(r'extra') | ||||
for raw in extras: | for raw in extras: | ||||
if b'=' not in raw: | if b'=' not in raw: | ||||
msg = _( | msg = _( |
# find the head of the main (master) branch | # find the head of the main (master) branch | ||||
master = ui.config(b'fastannotate', b'mainbranch') or rev | master = ui.config(b'fastannotate', b'mainbranch') or rev | ||||
# paths will be used for prefetching and the real annotating | # paths will be used for prefetching and the real annotating | ||||
paths = list(_matchpaths(repo, rev, pats, opts, aopts)) | paths = list(_matchpaths(repo, rev, pats, opts, aopts)) | ||||
# for client, prefetch from the server | # for client, prefetch from the server | ||||
if util.safehasattr(repo, b'prefetchfastannotate'): | if util.safehasattr(repo, 'prefetchfastannotate'): | ||||
repo.prefetchfastannotate(paths) | repo.prefetchfastannotate(paths) | ||||
for path in paths: | for path in paths: | ||||
result = lines = existinglines = None | result = lines = existinglines = None | ||||
while True: | while True: | ||||
try: | try: | ||||
with facontext.annotatecontext(repo, path, aopts, rebuild) as a: | with facontext.annotatecontext(repo, path, aopts, rebuild) as a: | ||||
result = a.annotate( | result = a.annotate( | ||||
repo = repo.unfiltered() | repo = repo.unfiltered() | ||||
# treat the file as text (skip the isbinary check) | # treat the file as text (skip the isbinary check) | ||||
if ui.configbool(b'fastannotate', b'forcetext'): | if ui.configbool(b'fastannotate', b'forcetext'): | ||||
opts[r'text'] = True | opts[r'text'] = True | ||||
# check if we need to do prefetch (client-side) | # check if we need to do prefetch (client-side) | ||||
rev = opts.get(r'rev') | rev = opts.get(r'rev') | ||||
if util.safehasattr(repo, b'prefetchfastannotate') and rev is not None: | if util.safehasattr(repo, 'prefetchfastannotate') and rev is not None: | ||||
paths = list(_matchpaths(repo, rev, pats, pycompat.byteskwargs(opts))) | paths = list(_matchpaths(repo, rev, pats, pycompat.byteskwargs(opts))) | ||||
repo.prefetchfastannotate(paths) | repo.prefetchfastannotate(paths) | ||||
return orig(ui, repo, *pats, **opts) | return orig(ui, repo, *pats, **opts) | ||||
def registercommand(): | def registercommand(): | ||||
"""register the fastannotate command""" | """register the fastannotate command""" | ||||
_(b'you need to provide a revision'), | _(b'you need to provide a revision'), | ||||
hint=_(b'set fastannotate.mainbranch or use --rev'), | hint=_(b'set fastannotate.mainbranch or use --rev'), | ||||
) | ) | ||||
if ui.configbool(b'fastannotate', b'unfilteredrepo'): | if ui.configbool(b'fastannotate', b'unfilteredrepo'): | ||||
repo = repo.unfiltered() | repo = repo.unfiltered() | ||||
ctx = scmutil.revsingle(repo, rev) | ctx = scmutil.revsingle(repo, rev) | ||||
m = scmutil.match(ctx, pats, opts) | m = scmutil.match(ctx, pats, opts) | ||||
paths = list(ctx.walk(m)) | paths = list(ctx.walk(m)) | ||||
if util.safehasattr(repo, b'prefetchfastannotate'): | if util.safehasattr(repo, 'prefetchfastannotate'): | ||||
# client | # client | ||||
if opts.get(b'REV'): | if opts.get(b'REV'): | ||||
raise error.Abort(_(b'--rev cannot be used for client')) | raise error.Abort(_(b'--rev cannot be used for client')) | ||||
repo.prefetchfastannotate(paths) | repo.prefetchfastannotate(paths) | ||||
else: | else: | ||||
# server, or full repo | # server, or full repo | ||||
progress = ui.makeprogress(_(b'building'), total=len(paths)) | progress = ui.makeprogress(_(b'building'), total=len(paths)) | ||||
for i, path in enumerate(paths): | for i, path in enumerate(paths): |
def settimeout(self, timeout): | def settimeout(self, timeout): | ||||
self._timeout = timeout | self._timeout = timeout | ||||
if self._watchmanclient is not None: | if self._watchmanclient is not None: | ||||
self._watchmanclient.setTimeout(timeout) | self._watchmanclient.setTimeout(timeout) | ||||
def getcurrentclock(self): | def getcurrentclock(self): | ||||
result = self.command(b'clock') | result = self.command(b'clock') | ||||
if not util.safehasattr(result, b'clock'): | if not util.safehasattr(result, 'clock'): | ||||
raise Unavailable( | raise Unavailable( | ||||
b'clock result is missing clock value', invalidate=True | b'clock result is missing clock value', invalidate=True | ||||
) | ) | ||||
return result.clock | return result.clock | ||||
def clearconnection(self): | def clearconnection(self): | ||||
self._watchmanclient = None | self._watchmanclient = None | ||||
dirstate.journalstorage = repo.journal | dirstate.journalstorage = repo.journal | ||||
dirstate.addparentchangecallback(b'journal', recorddirstateparents) | dirstate.addparentchangecallback(b'journal', recorddirstateparents) | ||||
# hooks to record dirstate changes | # hooks to record dirstate changes | ||||
def wrapdirstate(orig, repo): | def wrapdirstate(orig, repo): | ||||
"""Make journal storage available to the dirstate object""" | """Make journal storage available to the dirstate object""" | ||||
dirstate = orig(repo) | dirstate = orig(repo) | ||||
if util.safehasattr(repo, b'journal'): | if util.safehasattr(repo, 'journal'): | ||||
_setupdirstate(repo, dirstate) | _setupdirstate(repo, dirstate) | ||||
return dirstate | return dirstate | ||||
def recorddirstateparents(dirstate, old, new): | def recorddirstateparents(dirstate, old, new): | ||||
"""Records all dirstate parent changes in the journal.""" | """Records all dirstate parent changes in the journal.""" | ||||
old = list(old) | old = list(old) | ||||
new = list(new) | new = list(new) | ||||
if util.safehasattr(dirstate, b'journalstorage'): | if util.safehasattr(dirstate, 'journalstorage'): | ||||
# only record two hashes if there was a merge | # only record two hashes if there was a merge | ||||
oldhashes = old[:1] if old[1] == node.nullid else old | oldhashes = old[:1] if old[1] == node.nullid else old | ||||
newhashes = new[:1] if new[1] == node.nullid else new | newhashes = new[:1] if new[1] == node.nullid else new | ||||
dirstate.journalstorage.record( | dirstate.journalstorage.record( | ||||
wdirparenttype, b'.', oldhashes, newhashes | wdirparenttype, b'.', oldhashes, newhashes | ||||
) | ) | ||||
# hooks to record bookmark changes (both local and remote) | # hooks to record bookmark changes (both local and remote) | ||||
def recordbookmarks(orig, store, fp): | def recordbookmarks(orig, store, fp): | ||||
"""Records all bookmark changes in the journal.""" | """Records all bookmark changes in the journal.""" | ||||
repo = store._repo | repo = store._repo | ||||
if util.safehasattr(repo, b'journal'): | if util.safehasattr(repo, 'journal'): | ||||
oldmarks = bookmarks.bmstore(repo) | oldmarks = bookmarks.bmstore(repo) | ||||
for mark, value in pycompat.iteritems(store): | for mark, value in pycompat.iteritems(store): | ||||
oldvalue = oldmarks.get(mark, node.nullid) | oldvalue = oldmarks.get(mark, node.nullid) | ||||
if value != oldvalue: | if value != oldvalue: | ||||
repo.journal.record(bookmarktype, mark, oldvalue, value) | repo.journal.record(bookmarktype, mark, oldvalue, value) | ||||
return orig(store, fp) | return orig(store, fp) | ||||
fp.write(b'journal\n') | fp.write(b'journal\n') | ||||
def unsharejournal(orig, ui, repo, repopath): | def unsharejournal(orig, ui, repo, repopath): | ||||
"""Copy shared journal entries into this repo when unsharing""" | """Copy shared journal entries into this repo when unsharing""" | ||||
if ( | if ( | ||||
repo.path == repopath | repo.path == repopath | ||||
and repo.shared() | and repo.shared() | ||||
and util.safehasattr(repo, b'journal') | and util.safehasattr(repo, 'journal') | ||||
): | ): | ||||
sharedrepo = hg.sharedreposource(repo) | sharedrepo = hg.sharedreposource(repo) | ||||
sharedfeatures = _readsharedfeatures(repo) | sharedfeatures = _readsharedfeatures(repo) | ||||
if sharedrepo and sharedfeatures > {b'journal'}: | if sharedrepo and sharedfeatures > {b'journal'}: | ||||
# there is a shared repository and there are shared journal entries | # there is a shared repository and there are shared journal entries | ||||
# to copy. move shared date over from source to destination but | # to copy. move shared date over from source to destination but | ||||
# move the local file first | # move the local file first | ||||
if repo.vfs.exists(b'namejournal'): | if repo.vfs.exists(b'namejournal'): |
request if it is left unprocessed by the wrapped method. | request if it is left unprocessed by the wrapped method. | ||||
""" | """ | ||||
if orig(rctx, req, res, checkperm): | if orig(rctx, req, res, checkperm): | ||||
return True | return True | ||||
if not rctx.repo.ui.configbool(b'experimental', b'lfs.serve'): | if not rctx.repo.ui.configbool(b'experimental', b'lfs.serve'): | ||||
return False | return False | ||||
if not util.safehasattr(rctx.repo.svfs, b'lfslocalblobstore'): | if not util.safehasattr(rctx.repo.svfs, 'lfslocalblobstore'): | ||||
return False | return False | ||||
if not req.dispatchpath: | if not req.dispatchpath: | ||||
return False | return False | ||||
try: | try: | ||||
if req.dispatchpath == b'.git/info/lfs/objects/batch': | if req.dispatchpath == b'.git/info/lfs/objects/batch': | ||||
checkperm(rctx, req, b'pull') | checkperm(rctx, req, b'pull') |
op._widen_uninterr = repo.ui.uninterruptible() | op._widen_uninterr = repo.ui.uninterruptible() | ||||
op._widen_uninterr.__enter__() | op._widen_uninterr.__enter__() | ||||
# presence of _widen_bundle attribute activates widen handler later | # presence of _widen_bundle attribute activates widen handler later | ||||
op._widen_bundle = chgrpfile | op._widen_bundle = chgrpfile | ||||
# Set the new narrowspec if we're widening. The setnewnarrowpats() method | # Set the new narrowspec if we're widening. The setnewnarrowpats() method | ||||
# will currently always be there when using the core+narrowhg server, but | # will currently always be there when using the core+narrowhg server, but | ||||
# other servers may include a changespec part even when not widening (e.g. | # other servers may include a changespec part even when not widening (e.g. | ||||
# because we're deepening a shallow repo). | # because we're deepening a shallow repo). | ||||
if util.safehasattr(repo, b'setnewnarrowpats'): | if util.safehasattr(repo, 'setnewnarrowpats'): | ||||
repo.setnewnarrowpats() | repo.setnewnarrowpats() | ||||
def handlechangegroup_widen(op, inpart): | def handlechangegroup_widen(op, inpart): | ||||
"""Changegroup exchange handler which restores temporarily-stripped nodes""" | """Changegroup exchange handler which restores temporarily-stripped nodes""" | ||||
# We saved a bundle with stripped node data we must now restore. | # We saved a bundle with stripped node data we must now restore. | ||||
# This approach is based on mercurial/repair.py@6ee26a53c111. | # This approach is based on mercurial/repair.py@6ee26a53c111. | ||||
repo = op.repo | repo = op.repo | ||||
exchange.getbundle2partsmapping[b'changegroup'] = wrappedcgfn | exchange.getbundle2partsmapping[b'changegroup'] = wrappedcgfn | ||||
# Extend changegroup receiver so client can fixup after widen requests. | # Extend changegroup receiver so client can fixup after widen requests. | ||||
origcghandler = bundle2.parthandlermapping[b'changegroup'] | origcghandler = bundle2.parthandlermapping[b'changegroup'] | ||||
def wrappedcghandler(op, inpart): | def wrappedcghandler(op, inpart): | ||||
origcghandler(op, inpart) | origcghandler(op, inpart) | ||||
if util.safehasattr(op, b'_widen_bundle'): | if util.safehasattr(op, '_widen_bundle'): | ||||
handlechangegroup_widen(op, inpart) | handlechangegroup_widen(op, inpart) | ||||
if util.safehasattr(op, b'_bookmarksbackup'): | if util.safehasattr(op, '_bookmarksbackup'): | ||||
localrepo.localrepository._bookmarks.set( | localrepo.localrepository._bookmarks.set( | ||||
op.repo, op._bookmarksbackup | op.repo, op._bookmarksbackup | ||||
) | ) | ||||
del op._bookmarksbackup | del op._bookmarksbackup | ||||
wrappedcghandler.params = origcghandler.params | wrappedcghandler.params = origcghandler.params | ||||
bundle2.parthandlermapping[b'changegroup'] = wrappedcghandler | bundle2.parthandlermapping[b'changegroup'] = wrappedcghandler |
pass | pass | ||||
if conn is None: | if conn is None: | ||||
def _cleanup(orig): | def _cleanup(orig): | ||||
# close pipee first so peer.cleanup reading it won't deadlock, | # close pipee first so peer.cleanup reading it won't deadlock, | ||||
# if there are other processes with pipeo open (i.e. us). | # if there are other processes with pipeo open (i.e. us). | ||||
peer = orig.im_self | peer = orig.im_self | ||||
if util.safehasattr(peer, b'pipee'): | if util.safehasattr(peer, 'pipee'): | ||||
peer.pipee.close() | peer.pipee.close() | ||||
return orig() | return orig() | ||||
peer = hg.peer(self._repo.ui, {}, path) | peer = hg.peer(self._repo.ui, {}, path) | ||||
if util.safehasattr(peer, b'cleanup'): | if util.safehasattr(peer, 'cleanup'): | ||||
extensions.wrapfunction(peer, b'cleanup', _cleanup) | extensions.wrapfunction(peer, b'cleanup', _cleanup) | ||||
conn = connection(pathpool, peer) | conn = connection(pathpool, peer) | ||||
return conn | return conn | ||||
def close(self): | def close(self): | ||||
for pathpool in pycompat.itervalues(self._pool): | for pathpool in pycompat.itervalues(self._pool): | ||||
# since an exception could mean the connection is not in a reusable | # since an exception could mean the connection is not in a reusable | ||||
# state. | # state. | ||||
if type is None: | if type is None: | ||||
self._pool.append(self) | self._pool.append(self) | ||||
else: | else: | ||||
self.close() | self.close() | ||||
def close(self): | def close(self): | ||||
if util.safehasattr(self.peer, b'cleanup'): | if util.safehasattr(self.peer, 'cleanup'): | ||||
self.peer.cleanup() | self.peer.cleanup() |
def _updatecallstreamopts(self, command, opts): | def _updatecallstreamopts(self, command, opts): | ||||
if command != b'getbundle': | if command != b'getbundle': | ||||
return | return | ||||
if ( | if ( | ||||
constants.NETWORK_CAP_LEGACY_SSH_GETFILES | constants.NETWORK_CAP_LEGACY_SSH_GETFILES | ||||
not in self.capabilities() | not in self.capabilities() | ||||
): | ): | ||||
return | return | ||||
if not util.safehasattr(self, b'_localrepo'): | if not util.safehasattr(self, '_localrepo'): | ||||
return | return | ||||
if ( | if ( | ||||
constants.SHALLOWREPO_REQUIREMENT | constants.SHALLOWREPO_REQUIREMENT | ||||
not in self._localrepo.requirements | not in self._localrepo.requirements | ||||
): | ): | ||||
return | return | ||||
bundlecaps = opts.get(b'bundlecaps') | bundlecaps = opts.get(b'bundlecaps') | ||||
def _sendrequest(self, command, args, **opts): | def _sendrequest(self, command, args, **opts): | ||||
self._updatecallstreamopts(command, args) | self._updatecallstreamopts(command, args) | ||||
return super(remotefilepeer, self)._sendrequest( | return super(remotefilepeer, self)._sendrequest( | ||||
command, args, **opts | command, args, **opts | ||||
) | ) | ||||
def _callstream(self, command, **opts): | def _callstream(self, command, **opts): | ||||
supertype = super(remotefilepeer, self) | supertype = super(remotefilepeer, self) | ||||
if not util.safehasattr(supertype, b'_sendrequest'): | if not util.safehasattr(supertype, '_sendrequest'): | ||||
self._updatecallstreamopts(command, pycompat.byteskwargs(opts)) | self._updatecallstreamopts(command, pycompat.byteskwargs(opts)) | ||||
return super(remotefilepeer, self)._callstream(command, **opts) | return super(remotefilepeer, self)._callstream(command, **opts) | ||||
peer.__class__ = remotefilepeer | peer.__class__ = remotefilepeer | ||||
class cacheconnection(object): | class cacheconnection(object): | ||||
"""The connection for communicating with the remote cache. Performs | """The connection for communicating with the remote cache. Performs |
repo.ui.warn(msg) | repo.ui.warn(msg) | ||||
# We know this command will find a binary, so don't block on it starting. | # We know this command will find a binary, so don't block on it starting. | ||||
procutil.runbgcommand(cmd, encoding.environ, ensurestart=ensurestart) | procutil.runbgcommand(cmd, encoding.environ, ensurestart=ensurestart) | ||||
def fullrepack(repo, options=None): | def fullrepack(repo, options=None): | ||||
"""If ``packsonly`` is True, stores creating only loose objects are skipped. | """If ``packsonly`` is True, stores creating only loose objects are skipped. | ||||
""" | """ | ||||
if util.safehasattr(repo, b'shareddatastores'): | if util.safehasattr(repo, 'shareddatastores'): | ||||
datasource = contentstore.unioncontentstore(*repo.shareddatastores) | datasource = contentstore.unioncontentstore(*repo.shareddatastores) | ||||
historysource = metadatastore.unionmetadatastore( | historysource = metadatastore.unionmetadatastore( | ||||
*repo.sharedhistorystores, allowincomplete=True | *repo.sharedhistorystores, allowincomplete=True | ||||
) | ) | ||||
packpath = shallowutil.getcachepackpath( | packpath = shallowutil.getcachepackpath( | ||||
repo, constants.FILEPACK_CATEGORY | repo, constants.FILEPACK_CATEGORY | ||||
) | ) | ||||
_runrepack( | _runrepack( | ||||
repo, | repo, | ||||
datasource, | datasource, | ||||
historysource, | historysource, | ||||
packpath, | packpath, | ||||
constants.FILEPACK_CATEGORY, | constants.FILEPACK_CATEGORY, | ||||
options=options, | options=options, | ||||
) | ) | ||||
if util.safehasattr(repo.manifestlog, b'datastore'): | if util.safehasattr(repo.manifestlog, 'datastore'): | ||||
localdata, shareddata = _getmanifeststores(repo) | localdata, shareddata = _getmanifeststores(repo) | ||||
lpackpath, ldstores, lhstores = localdata | lpackpath, ldstores, lhstores = localdata | ||||
spackpath, sdstores, shstores = shareddata | spackpath, sdstores, shstores = shareddata | ||||
# Repack the shared manifest store | # Repack the shared manifest store | ||||
datasource = contentstore.unioncontentstore(*sdstores) | datasource = contentstore.unioncontentstore(*sdstores) | ||||
historysource = metadatastore.unionmetadatastore( | historysource = metadatastore.unionmetadatastore( | ||||
*shstores, allowincomplete=True | *shstores, allowincomplete=True | ||||
options=options, | options=options, | ||||
) | ) | ||||
def incrementalrepack(repo, options=None): | def incrementalrepack(repo, options=None): | ||||
"""This repacks the repo by looking at the distribution of pack files in the | """This repacks the repo by looking at the distribution of pack files in the | ||||
repo and performing the most minimal repack to keep the repo in good shape. | repo and performing the most minimal repack to keep the repo in good shape. | ||||
""" | """ | ||||
if util.safehasattr(repo, b'shareddatastores'): | if util.safehasattr(repo, 'shareddatastores'): | ||||
packpath = shallowutil.getcachepackpath( | packpath = shallowutil.getcachepackpath( | ||||
repo, constants.FILEPACK_CATEGORY | repo, constants.FILEPACK_CATEGORY | ||||
) | ) | ||||
_incrementalrepack( | _incrementalrepack( | ||||
repo, | repo, | ||||
repo.shareddatastores, | repo.shareddatastores, | ||||
repo.sharedhistorystores, | repo.sharedhistorystores, | ||||
packpath, | packpath, | ||||
constants.FILEPACK_CATEGORY, | constants.FILEPACK_CATEGORY, | ||||
options=options, | options=options, | ||||
) | ) | ||||
if util.safehasattr(repo.manifestlog, b'datastore'): | if util.safehasattr(repo.manifestlog, 'datastore'): | ||||
localdata, shareddata = _getmanifeststores(repo) | localdata, shareddata = _getmanifeststores(repo) | ||||
lpackpath, ldstores, lhstores = localdata | lpackpath, ldstores, lhstores = localdata | ||||
spackpath, sdstores, shstores = shareddata | spackpath, sdstores, shstores = shareddata | ||||
# Repack the shared manifest store | # Repack the shared manifest store | ||||
_incrementalrepack( | _incrementalrepack( | ||||
repo, | repo, | ||||
sdstores, | sdstores, | ||||
self.datarepacked = False | self.datarepacked = False | ||||
# If the revision's history entry was repacked into the repack target | # If the revision's history entry was repacked into the repack target | ||||
self.historyrepacked = False | self.historyrepacked = False | ||||
# If garbage collected | # If garbage collected | ||||
self.gced = False | self.gced = False | ||||
def repacklockvfs(repo): | def repacklockvfs(repo): | ||||
if util.safehasattr(repo, b'name'): | if util.safehasattr(repo, 'name'): | ||||
# Lock in the shared cache so repacks across multiple copies of the same | # Lock in the shared cache so repacks across multiple copies of the same | ||||
# repo are coordinated. | # repo are coordinated. | ||||
sharedcachepath = shallowutil.getcachepackpath( | sharedcachepath = shallowutil.getcachepackpath( | ||||
repo, constants.FILEPACK_CATEGORY | repo, constants.FILEPACK_CATEGORY | ||||
) | ) | ||||
return vfs.vfs(sharedcachepath) | return vfs.vfs(sharedcachepath) | ||||
else: | else: | ||||
return repo.svfs | return repo.svfs |
makeunionstores(repo) | makeunionstores(repo) | ||||
repo.includepattern = repo.ui.configlist( | repo.includepattern = repo.ui.configlist( | ||||
b"remotefilelog", b"includepattern", None | b"remotefilelog", b"includepattern", None | ||||
) | ) | ||||
repo.excludepattern = repo.ui.configlist( | repo.excludepattern = repo.ui.configlist( | ||||
b"remotefilelog", b"excludepattern", None | b"remotefilelog", b"excludepattern", None | ||||
) | ) | ||||
if not util.safehasattr(repo, b'connectionpool'): | if not util.safehasattr(repo, 'connectionpool'): | ||||
repo.connectionpool = connectionpool.connectionpool(repo) | repo.connectionpool = connectionpool.connectionpool(repo) | ||||
if repo.includepattern or repo.excludepattern: | if repo.includepattern or repo.excludepattern: | ||||
repo.shallowmatch = match.match( | repo.shallowmatch = match.match( | ||||
repo.root, b'', None, repo.includepattern, repo.excludepattern | repo.root, b'', None, repo.includepattern, repo.excludepattern | ||||
) | ) |
return None | return None | ||||
def compressed(self): | def compressed(self): | ||||
self.params # load params | self.params # load params | ||||
return self._compressed | return self._compressed | ||||
def close(self): | def close(self): | ||||
"""close underlying file""" | """close underlying file""" | ||||
if util.safehasattr(self._fp, b'close'): | if util.safehasattr(self._fp, 'close'): | ||||
return self._fp.close() | return self._fp.close() | ||||
formatmap = {b'20': unbundle20} | formatmap = {b'20': unbundle20} | ||||
b2streamparamsmap = {} | b2streamparamsmap = {} | ||||
self.mandatory, | self.mandatory, | ||||
) | ) | ||||
def copy(self): | def copy(self): | ||||
"""return a copy of the part | """return a copy of the part | ||||
The new part have the very same content but no partid assigned yet. | The new part have the very same content but no partid assigned yet. | ||||
Parts with generated data cannot be copied.""" | Parts with generated data cannot be copied.""" | ||||
assert not util.safehasattr(self.data, b'next') | assert not util.safehasattr(self.data, 'next') | ||||
return self.__class__( | return self.__class__( | ||||
self.type, | self.type, | ||||
self._mandatoryparams, | self._mandatoryparams, | ||||
self._advisoryparams, | self._advisoryparams, | ||||
self._data, | self._data, | ||||
self.mandatory, | self.mandatory, | ||||
) | ) | ||||
msg.append(b' (params:') | msg.append(b' (params:') | ||||
if nbmp: | if nbmp: | ||||
msg.append(b' %i mandatory' % nbmp) | msg.append(b' %i mandatory' % nbmp) | ||||
if nbap: | if nbap: | ||||
msg.append(b' %i advisory' % nbmp) | msg.append(b' %i advisory' % nbmp) | ||||
msg.append(b')') | msg.append(b')') | ||||
if not self.data: | if not self.data: | ||||
msg.append(b' empty payload') | msg.append(b' empty payload') | ||||
elif util.safehasattr(self.data, b'next') or util.safehasattr( | elif util.safehasattr(self.data, 'next') or util.safehasattr( | ||||
self.data, b'__next__' | self.data, b'__next__' | ||||
): | ): | ||||
msg.append(b' streamed payload') | msg.append(b' streamed payload') | ||||
else: | else: | ||||
msg.append(b' %i bytes payload' % len(self.data)) | msg.append(b' %i bytes payload' % len(self.data)) | ||||
msg.append(b'\n') | msg.append(b'\n') | ||||
ui.debug(b''.join(msg)) | ui.debug(b''.join(msg)) | ||||
self._generated = True | self._generated = True | ||||
def _payloadchunks(self): | def _payloadchunks(self): | ||||
"""yield chunks of a the part payload | """yield chunks of a the part payload | ||||
Exists to handle the different methods to provide data to a part.""" | Exists to handle the different methods to provide data to a part.""" | ||||
# we only support fixed size data now. | # we only support fixed size data now. | ||||
# This will be improved in the future. | # This will be improved in the future. | ||||
if util.safehasattr(self.data, b'next') or util.safehasattr( | if util.safehasattr(self.data, 'next') or util.safehasattr( | ||||
self.data, b'__next__' | self.data, b'__next__' | ||||
): | ): | ||||
buff = util.chunkbuffer(self.data) | buff = util.chunkbuffer(self.data) | ||||
chunk = buff.read(preferedchunksize) | chunk = buff.read(preferedchunksize) | ||||
while chunk: | while chunk: | ||||
yield chunk | yield chunk | ||||
chunk = buff.read(preferedchunksize) | chunk = buff.read(preferedchunksize) | ||||
elif len(self.data): | elif len(self.data): | ||||
debug(b'bundle2-input: payload chunk size: %i\n' % chunksize) | debug(b'bundle2-input: payload chunk size: %i\n' % chunksize) | ||||
class unbundlepart(unpackermixin): | class unbundlepart(unpackermixin): | ||||
"""a bundle part read from a bundle""" | """a bundle part read from a bundle""" | ||||
def __init__(self, ui, header, fp): | def __init__(self, ui, header, fp): | ||||
super(unbundlepart, self).__init__(fp) | super(unbundlepart, self).__init__(fp) | ||||
self._seekable = util.safehasattr(fp, b'seek') and util.safehasattr( | self._seekable = util.safehasattr(fp, 'seek') and util.safehasattr( | ||||
fp, b'tell' | fp, b'tell' | ||||
) | ) | ||||
self.ui = ui | self.ui = ui | ||||
# unbundle state attr | # unbundle state attr | ||||
self._headerdata = header | self._headerdata = header | ||||
self._headeroffset = 0 | self._headeroffset = 0 | ||||
self._initialized = False | self._initialized = False | ||||
self.consumed = False | self.consumed = False |
class bundlepeer(localrepo.localpeer): | class bundlepeer(localrepo.localpeer): | ||||
def canpush(self): | def canpush(self): | ||||
return False | return False | ||||
class bundlephasecache(phases.phasecache): | class bundlephasecache(phases.phasecache): | ||||
def __init__(self, *args, **kwargs): | def __init__(self, *args, **kwargs): | ||||
super(bundlephasecache, self).__init__(*args, **kwargs) | super(bundlephasecache, self).__init__(*args, **kwargs) | ||||
if util.safehasattr(self, b'opener'): | if util.safehasattr(self, 'opener'): | ||||
self.opener = vfsmod.readonlyvfs(self.opener) | self.opener = vfsmod.readonlyvfs(self.opener) | ||||
def write(self): | def write(self): | ||||
raise NotImplementedError | raise NotImplementedError | ||||
def _write(self, fp): | def _write(self, fp): | ||||
raise NotImplementedError | raise NotImplementedError | ||||
# converting bit strings to longs is slow | # converting bit strings to longs is slow | ||||
bit = (hash(node) & 0xFFFFFFFF) % _vecbits | bit = (hash(node) & 0xFFFFFFFF) % _vecbits | ||||
return v ^ (1 << bit) | return v ^ (1 << bit) | ||||
def ctxpvec(ctx): | def ctxpvec(ctx): | ||||
'''construct a pvec for ctx while filling in the cache''' | '''construct a pvec for ctx while filling in the cache''' | ||||
r = ctx.repo() | r = ctx.repo() | ||||
if not util.safehasattr(r, b"_pveccache"): | if not util.safehasattr(r, "_pveccache"): | ||||
r._pveccache = {} | r._pveccache = {} | ||||
pvc = r._pveccache | pvc = r._pveccache | ||||
if ctx.rev() not in pvc: | if ctx.rev() not in pvc: | ||||
cl = r.changelog | cl = r.changelog | ||||
for n in pycompat.xrange(ctx.rev() + 1): | for n in pycompat.xrange(ctx.rev() + 1): | ||||
if n not in pvc: | if n not in pvc: | ||||
node = cl.node(n) | node = cl.node(n) | ||||
p1, p2 = cl.parentrevs(n) | p1, p2 = cl.parentrevs(n) |
def _doregister(self, func, decl, *args, **kwargs): | def _doregister(self, func, decl, *args, **kwargs): | ||||
name = self._getname(decl) | name = self._getname(decl) | ||||
if name in self._table: | if name in self._table: | ||||
msg = b'duplicate registration for name: "%s"' % name | msg = b'duplicate registration for name: "%s"' % name | ||||
raise error.ProgrammingError(msg) | raise error.ProgrammingError(msg) | ||||
if func.__doc__ and not util.safehasattr(func, b'_origdoc'): | if func.__doc__ and not util.safehasattr(func, '_origdoc'): | ||||
func._origdoc = func.__doc__.strip() | func._origdoc = func.__doc__.strip() | ||||
doc = pycompat.sysbytes(func._origdoc) | doc = pycompat.sysbytes(func._origdoc) | ||||
func.__doc__ = pycompat.sysstr(self._formatdoc(decl, doc)) | func.__doc__ = pycompat.sysstr(self._formatdoc(decl, doc)) | ||||
self._table[name] = func | self._table[name] = func | ||||
self._extrasetup(name, func, *args, **kwargs) | self._extrasetup(name, func, *args, **kwargs) | ||||
return func | return func |
def mainfrozen(): | def mainfrozen(): | ||||
"""return True if we are a frozen executable. | """return True if we are a frozen executable. | ||||
The code supports py2exe (most common, Windows only) and tools/freeze | The code supports py2exe (most common, Windows only) and tools/freeze | ||||
(portable, not much used). | (portable, not much used). | ||||
""" | """ | ||||
return ( | return ( | ||||
pycompat.safehasattr(sys, b"frozen") | pycompat.safehasattr(sys, "frozen") | ||||
or pycompat.safehasattr(sys, b"importers") # new py2exe | or pycompat.safehasattr(sys, "importers") # new py2exe | ||||
or imp.is_frozen(r"__main__") # old py2exe | or imp.is_frozen(r"__main__") # old py2exe | ||||
) # tools/freeze | ) # tools/freeze | ||||
_hgexecutable = None | _hgexecutable = None | ||||
def hgexecutable(): | def hgexecutable(): |