diff --git a/mercurial/localrepo.py b/mercurial/localrepo.py --- a/mercurial/localrepo.py +++ b/mercurial/localrepo.py @@ -1,3698 +1,3697 @@ # localrepo.py - read/write repository class for mercurial # # Copyright 2005-2007 Matt Mackall # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import absolute_import import errno import functools import os import random import sys import time import weakref from .i18n import _ from .node import ( bin, hex, nullid, nullrev, short, ) from .pycompat import ( delattr, getattr, ) from . import ( bookmarks, branchmap, bundle2, bundlecaches, changegroup, color, commit, context, dirstate, dirstateguard, discovery, encoding, error, exchange, extensions, filelog, hook, lock as lockmod, match as matchmod, mergestate as mergestatemod, mergeutil, namespaces, narrowspec, obsolete, pathutil, phases, pushkey, pycompat, rcutil, repoview, requirements as requirementsmod, revlog, revset, revsetlang, scmutil, sparse, store as storemod, subrepoutil, tags as tagsmod, transaction, txnutil, util, vfs as vfsmod, ) from .interfaces import ( repository, util as interfaceutil, ) from .utils import ( hashutil, procutil, stringutil, ) from .revlogutils import ( concurrency_checker as revlogchecker, constants as revlogconst, ) release = lockmod.release urlerr = util.urlerr urlreq = util.urlreq # set of (path, vfs-location) tuples. vfs-location is: # - 'plain for vfs relative paths # - '' for svfs relative paths _cachedfiles = set() class _basefilecache(scmutil.filecache): """All filecache usage on repo are done for logic that should be unfiltered""" def __get__(self, repo, type=None): if repo is None: return self # proxy to unfiltered __dict__ since filtered repo has no entry unfi = repo.unfiltered() try: return unfi.__dict__[self.sname] except KeyError: pass return super(_basefilecache, self).__get__(unfi, type) def set(self, repo, value): return super(_basefilecache, self).set(repo.unfiltered(), value) class repofilecache(_basefilecache): """filecache for files in .hg but outside of .hg/store""" def __init__(self, *paths): super(repofilecache, self).__init__(*paths) for path in paths: _cachedfiles.add((path, b'plain')) def join(self, obj, fname): return obj.vfs.join(fname) class storecache(_basefilecache): """filecache for files in the store""" def __init__(self, *paths): super(storecache, self).__init__(*paths) for path in paths: _cachedfiles.add((path, b'')) def join(self, obj, fname): return obj.sjoin(fname) class mixedrepostorecache(_basefilecache): """filecache for a mix files in .hg/store and outside""" def __init__(self, *pathsandlocations): # scmutil.filecache only uses the path for passing back into our # join(), so we can safely pass a list of paths and locations super(mixedrepostorecache, self).__init__(*pathsandlocations) _cachedfiles.update(pathsandlocations) def join(self, obj, fnameandlocation): fname, location = fnameandlocation if location == b'plain': return obj.vfs.join(fname) else: if location != b'': raise error.ProgrammingError( b'unexpected location: %s' % location ) return obj.sjoin(fname) def isfilecached(repo, name): """check if a repo has already cached "name" filecache-ed property This returns (cachedobj-or-None, iscached) tuple. """ cacheentry = repo.unfiltered()._filecache.get(name, None) if not cacheentry: return None, False return cacheentry.obj, True class unfilteredpropertycache(util.propertycache): """propertycache that apply to unfiltered repo only""" def __get__(self, repo, type=None): unfi = repo.unfiltered() if unfi is repo: return super(unfilteredpropertycache, self).__get__(unfi) return getattr(unfi, self.name) class filteredpropertycache(util.propertycache): """propertycache that must take filtering in account""" def cachevalue(self, obj, value): object.__setattr__(obj, self.name, value) def hasunfilteredcache(repo, name): """check if a repo has an unfilteredpropertycache value for """ return name in vars(repo.unfiltered()) def unfilteredmethod(orig): """decorate method that always need to be run on unfiltered version""" @functools.wraps(orig) def wrapper(repo, *args, **kwargs): return orig(repo.unfiltered(), *args, **kwargs) return wrapper moderncaps = { b'lookup', b'branchmap', b'pushkey', b'known', b'getbundle', b'unbundle', } legacycaps = moderncaps.union({b'changegroupsubset'}) @interfaceutil.implementer(repository.ipeercommandexecutor) class localcommandexecutor(object): def __init__(self, peer): self._peer = peer self._sent = False self._closed = False def __enter__(self): return self def __exit__(self, exctype, excvalue, exctb): self.close() def callcommand(self, command, args): if self._sent: raise error.ProgrammingError( b'callcommand() cannot be used after sendcommands()' ) if self._closed: raise error.ProgrammingError( b'callcommand() cannot be used after close()' ) # We don't need to support anything fancy. Just call the named # method on the peer and return a resolved future. fn = getattr(self._peer, pycompat.sysstr(command)) f = pycompat.futures.Future() try: result = fn(**pycompat.strkwargs(args)) except Exception: pycompat.future_set_exception_info(f, sys.exc_info()[1:]) else: f.set_result(result) return f def sendcommands(self): self._sent = True def close(self): self._closed = True @interfaceutil.implementer(repository.ipeercommands) class localpeer(repository.peer): '''peer for a local repo; reflects only the most recent API''' def __init__(self, repo, caps=None): super(localpeer, self).__init__() if caps is None: caps = moderncaps.copy() self._repo = repo.filtered(b'served') self.ui = repo.ui self._caps = repo._restrictcapabilities(caps) # Begin of _basepeer interface. def url(self): return self._repo.url() def local(self): return self._repo def peer(self): return self def canpush(self): return True def close(self): self._repo.close() # End of _basepeer interface. # Begin of _basewirecommands interface. def branchmap(self): return self._repo.branchmap() def capabilities(self): return self._caps def clonebundles(self): return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE) def debugwireargs(self, one, two, three=None, four=None, five=None): """Used to test argument passing over the wire""" return b"%s %s %s %s %s" % ( one, two, pycompat.bytestr(three), pycompat.bytestr(four), pycompat.bytestr(five), ) def getbundle( self, source, heads=None, common=None, bundlecaps=None, **kwargs ): chunks = exchange.getbundlechunks( self._repo, source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs )[1] cb = util.chunkbuffer(chunks) if exchange.bundle2requested(bundlecaps): # When requesting a bundle2, getbundle returns a stream to make the # wire level function happier. We need to build a proper object # from it in local peer. return bundle2.getunbundler(self.ui, cb) else: return changegroup.getunbundler(b'01', cb, None) def heads(self): return self._repo.heads() def known(self, nodes): return self._repo.known(nodes) def listkeys(self, namespace): return self._repo.listkeys(namespace) def lookup(self, key): return self._repo.lookup(key) def pushkey(self, namespace, key, old, new): return self._repo.pushkey(namespace, key, old, new) def stream_out(self): raise error.Abort(_(b'cannot perform stream clone against local peer')) def unbundle(self, bundle, heads, url): """apply a bundle on a repo This function handles the repo locking itself.""" try: try: bundle = exchange.readbundle(self.ui, bundle, None) ret = exchange.unbundle(self._repo, bundle, heads, b'push', url) if util.safehasattr(ret, b'getchunks'): # This is a bundle20 object, turn it into an unbundler. # This little dance should be dropped eventually when the # API is finally improved. stream = util.chunkbuffer(ret.getchunks()) ret = bundle2.getunbundler(self.ui, stream) return ret except Exception as exc: # If the exception contains output salvaged from a bundle2 # reply, we need to make sure it is printed before continuing # to fail. So we build a bundle2 with such output and consume # it directly. # # This is not very elegant but allows a "simple" solution for # issue4594 output = getattr(exc, '_bundle2salvagedoutput', ()) if output: bundler = bundle2.bundle20(self._repo.ui) for out in output: bundler.addpart(out) stream = util.chunkbuffer(bundler.getchunks()) b = bundle2.getunbundler(self.ui, stream) bundle2.processbundle(self._repo, b) raise except error.PushRaced as exc: raise error.ResponseError( _(b'push failed:'), stringutil.forcebytestr(exc) ) # End of _basewirecommands interface. # Begin of peer interface. def commandexecutor(self): return localcommandexecutor(self) # End of peer interface. @interfaceutil.implementer(repository.ipeerlegacycommands) class locallegacypeer(localpeer): """peer extension which implements legacy methods too; used for tests with restricted capabilities""" def __init__(self, repo): super(locallegacypeer, self).__init__(repo, caps=legacycaps) # Begin of baselegacywirecommands interface. def between(self, pairs): return self._repo.between(pairs) def branches(self, nodes): return self._repo.branches(nodes) def changegroup(self, nodes, source): outgoing = discovery.outgoing( self._repo, missingroots=nodes, ancestorsof=self._repo.heads() ) return changegroup.makechangegroup(self._repo, outgoing, b'01', source) def changegroupsubset(self, bases, heads, source): outgoing = discovery.outgoing( self._repo, missingroots=bases, ancestorsof=heads ) return changegroup.makechangegroup(self._repo, outgoing, b'01', source) # End of baselegacywirecommands interface. # Functions receiving (ui, features) that extensions can register to impact # the ability to load repositories with custom requirements. Only # functions defined in loaded extensions are called. # # The function receives a set of requirement strings that the repository # is capable of opening. Functions will typically add elements to the # set to reflect that the extension knows how to handle that requirements. featuresetupfuncs = set() def _getsharedvfs(hgvfs, requirements): """returns the vfs object pointing to root of shared source repo for a shared repository hgvfs is vfs pointing at .hg/ of current repo (shared one) requirements is a set of requirements of current repo (shared one) """ # The ``shared`` or ``relshared`` requirements indicate the # store lives in the path contained in the ``.hg/sharedpath`` file. # This is an absolute path for ``shared`` and relative to # ``.hg/`` for ``relshared``. sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n') if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements: sharedpath = hgvfs.join(sharedpath) sharedvfs = vfsmod.vfs(sharedpath, realpath=True) if not sharedvfs.exists(): raise error.RepoError( _(b'.hg/sharedpath points to nonexistent directory %s') % sharedvfs.base ) return sharedvfs def _readrequires(vfs, allowmissing): """reads the require file present at root of this vfs and return a set of requirements If allowmissing is True, we suppress ENOENT if raised""" # requires file contains a newline-delimited list of # features/capabilities the opener (us) must have in order to use # the repository. This file was introduced in Mercurial 0.9.2, # which means very old repositories may not have one. We assume # a missing file translates to no requirements. try: requirements = set(vfs.read(b'requires').splitlines()) except IOError as e: if not (allowmissing and e.errno == errno.ENOENT): raise requirements = set() return requirements def makelocalrepository(baseui, path, intents=None): """Create a local repository object. Given arguments needed to construct a local repository, this function performs various early repository loading functionality (such as reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that the repository can be opened, derives a type suitable for representing that repository, and returns an instance of it. The returned object conforms to the ``repository.completelocalrepository`` interface. The repository type is derived by calling a series of factory functions for each aspect/interface of the final repository. These are defined by ``REPO_INTERFACES``. Each factory function is called to produce a type implementing a specific interface. The cumulative list of returned types will be combined into a new type and that type will be instantiated to represent the local repository. The factory functions each receive various state that may be consulted as part of deriving a type. Extensions should wrap these factory functions to customize repository type creation. Note that an extension's wrapped function may be called even if that extension is not loaded for the repo being constructed. Extensions should check if their ``__name__`` appears in the ``extensionmodulenames`` set passed to the factory function and no-op if not. """ ui = baseui.copy() # Prevent copying repo configuration. ui.copy = baseui.copy # Working directory VFS rooted at repository root. wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True) # Main VFS for .hg/ directory. hgpath = wdirvfs.join(b'.hg') hgvfs = vfsmod.vfs(hgpath, cacheaudited=True) # Whether this repository is shared one or not shared = False # If this repository is shared, vfs pointing to shared repo sharedvfs = None # The .hg/ path should exist and should be a directory. All other # cases are errors. if not hgvfs.isdir(): try: hgvfs.stat() except OSError as e: if e.errno != errno.ENOENT: raise except ValueError as e: # Can be raised on Python 3.8 when path is invalid. raise error.Abort( _(b'invalid path %s: %s') % (path, pycompat.bytestr(e)) ) raise error.RepoError(_(b'repository %s not found') % path) requirements = _readrequires(hgvfs, True) shared = ( requirementsmod.SHARED_REQUIREMENT in requirements or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements ) storevfs = None if shared: # This is a shared repo sharedvfs = _getsharedvfs(hgvfs, requirements) storevfs = vfsmod.vfs(sharedvfs.join(b'store')) else: storevfs = vfsmod.vfs(hgvfs.join(b'store')) # if .hg/requires contains the sharesafe requirement, it means # there exists a `.hg/store/requires` too and we should read it # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement # is present. We never write SHARESAFE_REQUIREMENT for a repo if store # is not present, refer checkrequirementscompat() for that # # However, if SHARESAFE_REQUIREMENT is not present, it means that the # repository was shared the old way. We check the share source .hg/requires # for SHARESAFE_REQUIREMENT to detect whether the current repository needs # to be reshared hint = _("see `hg help config.format.use-share-safe` for more information") if requirementsmod.SHARESAFE_REQUIREMENT in requirements: if ( shared and requirementsmod.SHARESAFE_REQUIREMENT not in _readrequires(sharedvfs, True) ): mismatch_warn = ui.configbool( b'share', b'safe-mismatch.source-not-safe.warn' ) mismatch_config = ui.config( b'share', b'safe-mismatch.source-not-safe' ) if mismatch_config in ( b'downgrade-allow', b'allow', b'downgrade-abort', ): # prevent cyclic import localrepo -> upgrade -> localrepo from . import upgrade upgrade.downgrade_share_to_non_safe( ui, hgvfs, sharedvfs, requirements, mismatch_config, mismatch_warn, ) elif mismatch_config == b'abort': raise error.Abort( _(b"share source does not support share-safe requirement"), hint=hint, ) else: raise error.Abort( _( b"share-safe mismatch with source.\nUnrecognized" b" value '%s' of `share.safe-mismatch.source-not-safe`" b" set." ) % mismatch_config, hint=hint, ) else: requirements |= _readrequires(storevfs, False) elif shared: sourcerequires = _readrequires(sharedvfs, False) if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires: mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe') mismatch_warn = ui.configbool( b'share', b'safe-mismatch.source-safe.warn' ) if mismatch_config in ( b'upgrade-allow', b'allow', b'upgrade-abort', ): # prevent cyclic import localrepo -> upgrade -> localrepo from . import upgrade upgrade.upgrade_share_to_safe( ui, hgvfs, storevfs, requirements, mismatch_config, mismatch_warn, ) elif mismatch_config == b'abort': raise error.Abort( _( b'version mismatch: source uses share-safe' b' functionality while the current share does not' ), hint=hint, ) else: raise error.Abort( _( b"share-safe mismatch with source.\nUnrecognized" b" value '%s' of `share.safe-mismatch.source-safe` set." ) % mismatch_config, hint=hint, ) # The .hg/hgrc file may load extensions or contain config options # that influence repository construction. Attempt to load it and # process any new extensions that it may have pulled in. if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs): afterhgrcload(ui, wdirvfs, hgvfs, requirements) extensions.loadall(ui) extensions.populateui(ui) # Set of module names of extensions loaded for this repository. extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)} supportedrequirements = gathersupportedrequirements(ui) # We first validate the requirements are known. ensurerequirementsrecognized(requirements, supportedrequirements) # Then we validate that the known set is reasonable to use together. ensurerequirementscompatible(ui, requirements) # TODO there are unhandled edge cases related to opening repositories with # shared storage. If storage is shared, we should also test for requirements # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in # that repo, as that repo may load extensions needed to open it. This is a # bit complicated because we don't want the other hgrc to overwrite settings # in this hgrc. # # This bug is somewhat mitigated by the fact that we copy the .hg/requires # file when sharing repos. But if a requirement is added after the share is # performed, thereby introducing a new requirement for the opener, we may # will not see that and could encounter a run-time error interacting with # that shared store since it has an unknown-to-us requirement. # At this point, we know we should be capable of opening the repository. # Now get on with doing that. features = set() # The "store" part of the repository holds versioned data. How it is # accessed is determined by various requirements. If `shared` or # `relshared` requirements are present, this indicates current repository # is a share and store exists in path mentioned in `.hg/sharedpath` if shared: storebasepath = sharedvfs.base cachepath = sharedvfs.join(b'cache') features.add(repository.REPO_FEATURE_SHARED_STORAGE) else: storebasepath = hgvfs.base cachepath = hgvfs.join(b'cache') wcachepath = hgvfs.join(b'wcache') # The store has changed over time and the exact layout is dictated by # requirements. The store interface abstracts differences across all # of them. store = makestore( requirements, storebasepath, lambda base: vfsmod.vfs(base, cacheaudited=True), ) hgvfs.createmode = store.createmode storevfs = store.vfs storevfs.options = resolvestorevfsoptions(ui, requirements, features) # The cache vfs is used to manage cache files. cachevfs = vfsmod.vfs(cachepath, cacheaudited=True) cachevfs.createmode = store.createmode # The cache vfs is used to manage cache files related to the working copy wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True) wcachevfs.createmode = store.createmode # Now resolve the type for the repository object. We do this by repeatedly # calling a factory function to produces types for specific aspects of the # repo's operation. The aggregate returned types are used as base classes # for a dynamically-derived type, which will represent our new repository. bases = [] extrastate = {} for iface, fn in REPO_INTERFACES: # We pass all potentially useful state to give extensions tons of # flexibility. typ = fn()( ui=ui, intents=intents, requirements=requirements, features=features, wdirvfs=wdirvfs, hgvfs=hgvfs, store=store, storevfs=storevfs, storeoptions=storevfs.options, cachevfs=cachevfs, wcachevfs=wcachevfs, extensionmodulenames=extensionmodulenames, extrastate=extrastate, baseclasses=bases, ) if not isinstance(typ, type): raise error.ProgrammingError( b'unable to construct type for %s' % iface ) bases.append(typ) # type() allows you to use characters in type names that wouldn't be # recognized as Python symbols in source code. We abuse that to add # rich information about our constructed repo. name = pycompat.sysstr( b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements))) ) cls = type(name, tuple(bases), {}) return cls( baseui=baseui, ui=ui, origroot=path, wdirvfs=wdirvfs, hgvfs=hgvfs, requirements=requirements, supportedrequirements=supportedrequirements, sharedpath=storebasepath, store=store, cachevfs=cachevfs, wcachevfs=wcachevfs, features=features, intents=intents, ) def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None): """Load hgrc files/content into a ui instance. This is called during repository opening to load any additional config files or settings relevant to the current repository. Returns a bool indicating whether any additional configs were loaded. Extensions should monkeypatch this function to modify how per-repo configs are loaded. For example, an extension may wish to pull in configs from alternate files or sources. sharedvfs is vfs object pointing to source repo if the current one is a shared one """ if not rcutil.use_repo_hgrc(): return False ret = False # first load config from shared source if we has to if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs: try: ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base) ret = True except IOError: pass try: ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base) ret = True except IOError: pass try: ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base) ret = True except IOError: pass return ret def afterhgrcload(ui, wdirvfs, hgvfs, requirements): """Perform additional actions after .hg/hgrc is loaded. This function is called during repository loading immediately after the .hg/hgrc file is loaded and before per-repo extensions are loaded. The function can be used to validate configs, automatically add options (including extensions) based on requirements, etc. """ # Map of requirements to list of extensions to load automatically when # requirement is present. autoextensions = { b'git': [b'git'], b'largefiles': [b'largefiles'], b'lfs': [b'lfs'], } for requirement, names in sorted(autoextensions.items()): if requirement not in requirements: continue for name in names: if not ui.hasconfig(b'extensions', name): ui.setconfig(b'extensions', name, b'', source=b'autoload') def gathersupportedrequirements(ui): """Determine the complete set of recognized requirements.""" # Start with all requirements supported by this file. supported = set(localrepository._basesupported) # Execute ``featuresetupfuncs`` entries if they belong to an extension # relevant to this ui instance. modules = {m.__name__ for n, m in extensions.extensions(ui)} for fn in featuresetupfuncs: if fn.__module__ in modules: fn(ui, supported) # Add derived requirements from registered compression engines. for name in util.compengines: engine = util.compengines[name] if engine.available() and engine.revlogheader(): supported.add(b'exp-compression-%s' % name) if engine.name() == b'zstd': supported.add(b'revlog-compression-zstd') return supported def ensurerequirementsrecognized(requirements, supported): """Validate that a set of local requirements is recognized. Receives a set of requirements. Raises an ``error.RepoError`` if there exists any requirement in that set that currently loaded code doesn't recognize. Returns a set of supported requirements. """ missing = set() for requirement in requirements: if requirement in supported: continue if not requirement or not requirement[0:1].isalnum(): raise error.RequirementError(_(b'.hg/requires file is corrupt')) missing.add(requirement) if missing: raise error.RequirementError( _(b'repository requires features unknown to this Mercurial: %s') % b' '.join(sorted(missing)), hint=_( b'see https://mercurial-scm.org/wiki/MissingRequirement ' b'for more information' ), ) def ensurerequirementscompatible(ui, requirements): """Validates that a set of recognized requirements is mutually compatible. Some requirements may not be compatible with others or require config options that aren't enabled. This function is called during repository opening to ensure that the set of requirements needed to open a repository is sane and compatible with config options. Extensions can monkeypatch this function to perform additional checking. ``error.RepoError`` should be raised on failure. """ if ( requirementsmod.SPARSE_REQUIREMENT in requirements and not sparse.enabled ): raise error.RepoError( _( b'repository is using sparse feature but ' b'sparse is not enabled; enable the ' b'"sparse" extensions to access' ) ) def makestore(requirements, path, vfstype): """Construct a storage object for a repository.""" if b'store' in requirements: if b'fncache' in requirements: - return storemod.fncachestore( - path, vfstype, b'dotencode' in requirements - ) + dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements + return storemod.fncachestore(path, vfstype, dotencode) return storemod.encodedstore(path, vfstype) return storemod.basicstore(path, vfstype) def resolvestorevfsoptions(ui, requirements, features): """Resolve the options to pass to the store vfs opener. The returned dict is used to influence behavior of the storage layer. """ options = {} if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements: options[b'treemanifest'] = True # experimental config: format.manifestcachesize manifestcachesize = ui.configint(b'format', b'manifestcachesize') if manifestcachesize is not None: options[b'manifestcachesize'] = manifestcachesize # In the absence of another requirement superseding a revlog-related # requirement, we have to assume the repo is using revlog version 0. # This revlog format is super old and we don't bother trying to parse # opener options for it because those options wouldn't do anything # meaningful on such old repos. if ( requirementsmod.REVLOGV1_REQUIREMENT in requirements or requirementsmod.REVLOGV2_REQUIREMENT in requirements ): options.update(resolverevlogstorevfsoptions(ui, requirements, features)) else: # explicitly mark repo as using revlogv0 options[b'revlogv0'] = True if requirementsmod.COPIESSDC_REQUIREMENT in requirements: options[b'copies-storage'] = b'changeset-sidedata' else: writecopiesto = ui.config(b'experimental', b'copies.write-to') copiesextramode = (b'changeset-only', b'compatibility') if writecopiesto in copiesextramode: options[b'copies-storage'] = b'extra' return options def resolverevlogstorevfsoptions(ui, requirements, features): """Resolve opener options specific to revlogs.""" options = {} options[b'flagprocessors'] = {} if requirementsmod.REVLOGV1_REQUIREMENT in requirements: options[b'revlogv1'] = True if requirementsmod.REVLOGV2_REQUIREMENT in requirements: options[b'revlogv2'] = True if requirementsmod.GENERALDELTA_REQUIREMENT in requirements: options[b'generaldelta'] = True # experimental config: format.chunkcachesize chunkcachesize = ui.configint(b'format', b'chunkcachesize') if chunkcachesize is not None: options[b'chunkcachesize'] = chunkcachesize deltabothparents = ui.configbool( b'storage', b'revlog.optimize-delta-parent-choice' ) options[b'deltabothparents'] = deltabothparents lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta') lazydeltabase = False if lazydelta: lazydeltabase = ui.configbool( b'storage', b'revlog.reuse-external-delta-parent' ) if lazydeltabase is None: lazydeltabase = not scmutil.gddeltaconfig(ui) options[b'lazydelta'] = lazydelta options[b'lazydeltabase'] = lazydeltabase chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan') if 0 <= chainspan: options[b'maxdeltachainspan'] = chainspan mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold') if mmapindexthreshold is not None: options[b'mmapindexthreshold'] = mmapindexthreshold withsparseread = ui.configbool(b'experimental', b'sparse-read') srdensitythres = float( ui.config(b'experimental', b'sparse-read.density-threshold') ) srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size') options[b'with-sparse-read'] = withsparseread options[b'sparse-read-density-threshold'] = srdensitythres options[b'sparse-read-min-gap-size'] = srmingapsize sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements options[b'sparse-revlog'] = sparserevlog if sparserevlog: options[b'generaldelta'] = True sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements options[b'side-data'] = sidedata maxchainlen = None if sparserevlog: maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH # experimental config: format.maxchainlen maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen) if maxchainlen is not None: options[b'maxchainlen'] = maxchainlen for r in requirements: # we allow multiple compression engine requirement to co-exist because # strickly speaking, revlog seems to support mixed compression style. # # The compression used for new entries will be "the last one" prefix = r.startswith if prefix(b'revlog-compression-') or prefix(b'exp-compression-'): options[b'compengine'] = r.split(b'-', 2)[2] options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level') if options[b'zlib.level'] is not None: if not (0 <= options[b'zlib.level'] <= 9): msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d') raise error.Abort(msg % options[b'zlib.level']) options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level') if options[b'zstd.level'] is not None: if not (0 <= options[b'zstd.level'] <= 22): msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d') raise error.Abort(msg % options[b'zstd.level']) if requirementsmod.NARROW_REQUIREMENT in requirements: options[b'enableellipsis'] = True if ui.configbool(b'experimental', b'rust.index'): options[b'rust.index'] = True if requirementsmod.NODEMAP_REQUIREMENT in requirements: slow_path = ui.config( b'storage', b'revlog.persistent-nodemap.slow-path' ) if slow_path not in (b'allow', b'warn', b'abort'): default = ui.config_default( b'storage', b'revlog.persistent-nodemap.slow-path' ) msg = _( b'unknown value for config ' b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n' ) ui.warn(msg % slow_path) if not ui.quiet: ui.warn(_(b'falling back to default value: %s\n') % default) slow_path = default msg = _( b"accessing `persistent-nodemap` repository without associated " b"fast implementation." ) hint = _( b"check `hg help config.format.use-persistent-nodemap` " b"for details" ) if not revlog.HAS_FAST_PERSISTENT_NODEMAP: if slow_path == b'warn': msg = b"warning: " + msg + b'\n' ui.warn(msg) if not ui.quiet: hint = b'(' + hint + b')\n' ui.warn(hint) if slow_path == b'abort': raise error.Abort(msg, hint=hint) options[b'persistent-nodemap'] = True if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'): options[b'persistent-nodemap.mmap'] = True if ui.configbool(b'devel', b'persistent-nodemap'): options[b'devel-force-nodemap'] = True return options def makemain(**kwargs): """Produce a type conforming to ``ilocalrepositorymain``.""" return localrepository @interfaceutil.implementer(repository.ilocalrepositoryfilestorage) class revlogfilestorage(object): """File storage when using revlogs.""" def file(self, path): if path[0] == b'/': path = path[1:] return filelog.filelog(self.svfs, path) @interfaceutil.implementer(repository.ilocalrepositoryfilestorage) class revlognarrowfilestorage(object): """File storage when using revlogs and narrow files.""" def file(self, path): if path[0] == b'/': path = path[1:] return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch) def makefilestorage(requirements, features, **kwargs): """Produce a type conforming to ``ilocalrepositoryfilestorage``.""" features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE) features.add(repository.REPO_FEATURE_STREAM_CLONE) if requirementsmod.NARROW_REQUIREMENT in requirements: return revlognarrowfilestorage else: return revlogfilestorage # List of repository interfaces and factory functions for them. Each # will be called in order during ``makelocalrepository()`` to iteratively # derive the final type for a local repository instance. We capture the # function as a lambda so we don't hold a reference and the module-level # functions can be wrapped. REPO_INTERFACES = [ (repository.ilocalrepositorymain, lambda: makemain), (repository.ilocalrepositoryfilestorage, lambda: makefilestorage), ] @interfaceutil.implementer(repository.ilocalrepositorymain) class localrepository(object): """Main class for representing local repositories. All local repositories are instances of this class. Constructed on its own, instances of this class are not usable as repository objects. To obtain a usable repository object, call ``hg.repository()``, ``localrepo.instance()``, or ``localrepo.makelocalrepository()``. The latter is the lowest-level. ``instance()`` adds support for creating new repositories. ``hg.repository()`` adds more extension integration, including calling ``reposetup()``. Generally speaking, ``hg.repository()`` should be used. """ # obsolete experimental requirements: # - manifestv2: An experimental new manifest format that allowed # for stem compression of long paths. Experiment ended up not # being successful (repository sizes went up due to worse delta # chains), and the code was deleted in 4.6. supportedformats = { requirementsmod.REVLOGV1_REQUIREMENT, requirementsmod.GENERALDELTA_REQUIREMENT, requirementsmod.TREEMANIFEST_REQUIREMENT, requirementsmod.COPIESSDC_REQUIREMENT, requirementsmod.REVLOGV2_REQUIREMENT, requirementsmod.SIDEDATA_REQUIREMENT, requirementsmod.SPARSEREVLOG_REQUIREMENT, requirementsmod.NODEMAP_REQUIREMENT, bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT, requirementsmod.SHARESAFE_REQUIREMENT, } _basesupported = supportedformats | { b'store', b'fncache', requirementsmod.SHARED_REQUIREMENT, requirementsmod.RELATIVE_SHARED_REQUIREMENT, - b'dotencode', + requirementsmod.DOTENCODE_REQUIREMENT, requirementsmod.SPARSE_REQUIREMENT, requirementsmod.INTERNAL_PHASE_REQUIREMENT, } # list of prefix for file which can be written without 'wlock' # Extensions should extend this list when needed _wlockfreeprefix = { # We migh consider requiring 'wlock' for the next # two, but pretty much all the existing code assume # wlock is not needed so we keep them excluded for # now. b'hgrc', b'requires', # XXX cache is a complicatged business someone # should investigate this in depth at some point b'cache/', # XXX shouldn't be dirstate covered by the wlock? b'dirstate', # XXX bisect was still a bit too messy at the time # this changeset was introduced. Someone should fix # the remainig bit and drop this line b'bisect.state', } def __init__( self, baseui, ui, origroot, wdirvfs, hgvfs, requirements, supportedrequirements, sharedpath, store, cachevfs, wcachevfs, features, intents=None, ): """Create a new local repository instance. Most callers should use ``hg.repository()``, ``localrepo.instance()``, or ``localrepo.makelocalrepository()`` for obtaining a new repository object. Arguments: baseui ``ui.ui`` instance that ``ui`` argument was based off of. ui ``ui.ui`` instance for use by the repository. origroot ``bytes`` path to working directory root of this repository. wdirvfs ``vfs.vfs`` rooted at the working directory. hgvfs ``vfs.vfs`` rooted at .hg/ requirements ``set`` of bytestrings representing repository opening requirements. supportedrequirements ``set`` of bytestrings representing repository requirements that we know how to open. May be a supetset of ``requirements``. sharedpath ``bytes`` Defining path to storage base directory. Points to a ``.hg/`` directory somewhere. store ``store.basicstore`` (or derived) instance providing access to versioned storage. cachevfs ``vfs.vfs`` used for cache files. wcachevfs ``vfs.vfs`` used for cache files related to the working copy. features ``set`` of bytestrings defining features/capabilities of this instance. intents ``set`` of system strings indicating what this repo will be used for. """ self.baseui = baseui self.ui = ui self.origroot = origroot # vfs rooted at working directory. self.wvfs = wdirvfs self.root = wdirvfs.base # vfs rooted at .hg/. Used to access most non-store paths. self.vfs = hgvfs self.path = hgvfs.base self.requirements = requirements self.supported = supportedrequirements self.sharedpath = sharedpath self.store = store self.cachevfs = cachevfs self.wcachevfs = wcachevfs self.features = features self.filtername = None if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool( b'devel', b'check-locks' ): self.vfs.audit = self._getvfsward(self.vfs.audit) # A list of callback to shape the phase if no data were found. # Callback are in the form: func(repo, roots) --> processed root. # This list it to be filled by extension during repo setup self._phasedefaults = [] color.setup(self.ui) self.spath = self.store.path self.svfs = self.store.vfs self.sjoin = self.store.join if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool( b'devel', b'check-locks' ): if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit) else: # standard vfs self.svfs.audit = self._getsvfsward(self.svfs.audit) self._dirstatevalidatewarned = False self._branchcaches = branchmap.BranchMapCache() self._revbranchcache = None self._filterpats = {} self._datafilters = {} self._transref = self._lockref = self._wlockref = None # A cache for various files under .hg/ that tracks file changes, # (used by the filecache decorator) # # Maps a property name to its util.filecacheentry self._filecache = {} # hold sets of revision to be filtered # should be cleared when something might have changed the filter value: # - new changesets, # - phase change, # - new obsolescence marker, # - working directory parent change, # - bookmark changes self.filteredrevcache = {} # post-dirstate-status hooks self._postdsstatus = [] # generic mapping between names and nodes self.names = namespaces.namespaces() # Key to signature value. self._sparsesignaturecache = {} # Signature to cached matcher instance. self._sparsematchercache = {} self._extrafilterid = repoview.extrafilter(ui) self.filecopiesmode = None if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements: self.filecopiesmode = b'changeset-sidedata' def _getvfsward(self, origfunc): """build a ward for self.vfs""" rref = weakref.ref(self) def checkvfs(path, mode=None): ret = origfunc(path, mode=mode) repo = rref() if ( repo is None or not util.safehasattr(repo, b'_wlockref') or not util.safehasattr(repo, b'_lockref') ): return if mode in (None, b'r', b'rb'): return if path.startswith(repo.path): # truncate name relative to the repository (.hg) path = path[len(repo.path) + 1 :] if path.startswith(b'cache/'): msg = b'accessing cache with vfs instead of cachevfs: "%s"' repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs") # path prefixes covered by 'lock' vfs_path_prefixes = ( b'journal.', b'undo.', b'strip-backup/', b'cache/', ) if any(path.startswith(prefix) for prefix in vfs_path_prefixes): if repo._currentlock(repo._lockref) is None: repo.ui.develwarn( b'write with no lock: "%s"' % path, stacklevel=3, config=b'check-locks', ) elif repo._currentlock(repo._wlockref) is None: # rest of vfs files are covered by 'wlock' # # exclude special files for prefix in self._wlockfreeprefix: if path.startswith(prefix): return repo.ui.develwarn( b'write with no wlock: "%s"' % path, stacklevel=3, config=b'check-locks', ) return ret return checkvfs def _getsvfsward(self, origfunc): """build a ward for self.svfs""" rref = weakref.ref(self) def checksvfs(path, mode=None): ret = origfunc(path, mode=mode) repo = rref() if repo is None or not util.safehasattr(repo, b'_lockref'): return if mode in (None, b'r', b'rb'): return if path.startswith(repo.sharedpath): # truncate name relative to the repository (.hg) path = path[len(repo.sharedpath) + 1 :] if repo._currentlock(repo._lockref) is None: repo.ui.develwarn( b'write with no lock: "%s"' % path, stacklevel=4 ) return ret return checksvfs def close(self): self._writecaches() def _writecaches(self): if self._revbranchcache: self._revbranchcache.write() def _restrictcapabilities(self, caps): if self.ui.configbool(b'experimental', b'bundle2-advertise'): caps = set(caps) capsblob = bundle2.encodecaps( bundle2.getrepocaps(self, role=b'client') ) caps.add(b'bundle2=' + urlreq.quote(capsblob)) return caps # Don't cache auditor/nofsauditor, or you'll end up with reference cycle: # self -> auditor -> self._checknested -> self @property def auditor(self): # This is only used by context.workingctx.match in order to # detect files in subrepos. return pathutil.pathauditor(self.root, callback=self._checknested) @property def nofsauditor(self): # This is only used by context.basectx.match in order to detect # files in subrepos. return pathutil.pathauditor( self.root, callback=self._checknested, realfs=False, cached=True ) def _checknested(self, path): """Determine if path is a legal nested repository.""" if not path.startswith(self.root): return False subpath = path[len(self.root) + 1 :] normsubpath = util.pconvert(subpath) # XXX: Checking against the current working copy is wrong in # the sense that it can reject things like # # $ hg cat -r 10 sub/x.txt # # if sub/ is no longer a subrepository in the working copy # parent revision. # # However, it can of course also allow things that would have # been rejected before, such as the above cat command if sub/ # is a subrepository now, but was a normal directory before. # The old path auditor would have rejected by mistake since it # panics when it sees sub/.hg/. # # All in all, checking against the working copy seems sensible # since we want to prevent access to nested repositories on # the filesystem *now*. ctx = self[None] parts = util.splitpath(subpath) while parts: prefix = b'/'.join(parts) if prefix in ctx.substate: if prefix == normsubpath: return True else: sub = ctx.sub(prefix) return sub.checknested(subpath[len(prefix) + 1 :]) else: parts.pop() return False def peer(self): return localpeer(self) # not cached to avoid reference cycle def unfiltered(self): """Return unfiltered version of the repository Intended to be overwritten by filtered repo.""" return self def filtered(self, name, visibilityexceptions=None): """Return a filtered version of a repository The `name` parameter is the identifier of the requested view. This will return a repoview object set "exactly" to the specified view. This function does not apply recursive filtering to a repository. For example calling `repo.filtered("served")` will return a repoview using the "served" view, regardless of the initial view used by `repo`. In other word, there is always only one level of `repoview` "filtering". """ if self._extrafilterid is not None and b'%' not in name: name = name + b'%' + self._extrafilterid cls = repoview.newtype(self.unfiltered().__class__) return cls(self, name, visibilityexceptions) @mixedrepostorecache( (b'bookmarks', b'plain'), (b'bookmarks.current', b'plain'), (b'bookmarks', b''), (b'00changelog.i', b''), ) def _bookmarks(self): # Since the multiple files involved in the transaction cannot be # written atomically (with current repository format), there is a race # condition here. # # 1) changelog content A is read # 2) outside transaction update changelog to content B # 3) outside transaction update bookmark file referring to content B # 4) bookmarks file content is read and filtered against changelog-A # # When this happens, bookmarks against nodes missing from A are dropped. # # Having this happening during read is not great, but it become worse # when this happen during write because the bookmarks to the "unknown" # nodes will be dropped for good. However, writes happen within locks. # This locking makes it possible to have a race free consistent read. # For this purpose data read from disc before locking are # "invalidated" right after the locks are taken. This invalidations are # "light", the `filecache` mechanism keep the data in memory and will # reuse them if the underlying files did not changed. Not parsing the # same data multiple times helps performances. # # Unfortunately in the case describe above, the files tracked by the # bookmarks file cache might not have changed, but the in-memory # content is still "wrong" because we used an older changelog content # to process the on-disk data. So after locking, the changelog would be # refreshed but `_bookmarks` would be preserved. # Adding `00changelog.i` to the list of tracked file is not # enough, because at the time we build the content for `_bookmarks` in # (4), the changelog file has already diverged from the content used # for loading `changelog` in (1) # # To prevent the issue, we force the changelog to be explicitly # reloaded while computing `_bookmarks`. The data race can still happen # without the lock (with a narrower window), but it would no longer go # undetected during the lock time refresh. # # The new schedule is as follow # # 1) filecache logic detect that `_bookmarks` needs to be computed # 2) cachestat for `bookmarks` and `changelog` are captured (for book) # 3) We force `changelog` filecache to be tested # 4) cachestat for `changelog` are captured (for changelog) # 5) `_bookmarks` is computed and cached # # The step in (3) ensure we have a changelog at least as recent as the # cache stat computed in (1). As a result at locking time: # * if the changelog did not changed since (1) -> we can reuse the data # * otherwise -> the bookmarks get refreshed. self._refreshchangelog() return bookmarks.bmstore(self) def _refreshchangelog(self): """make sure the in memory changelog match the on-disk one""" if 'changelog' in vars(self) and self.currenttransaction() is None: del self.changelog @property def _activebookmark(self): return self._bookmarks.active # _phasesets depend on changelog. what we need is to call # _phasecache.invalidate() if '00changelog.i' was changed, but it # can't be easily expressed in filecache mechanism. @storecache(b'phaseroots', b'00changelog.i') def _phasecache(self): return phases.phasecache(self, self._phasedefaults) @storecache(b'obsstore') def obsstore(self): return obsolete.makestore(self.ui, self) @storecache(b'00changelog.i') def changelog(self): # load dirstate before changelog to avoid race see issue6303 self.dirstate.prefetch_parents() return self.store.changelog( txnutil.mayhavepending(self.root), concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'), ) @storecache(b'00manifest.i') def manifestlog(self): return self.store.manifestlog(self, self._storenarrowmatch) @repofilecache(b'dirstate') def dirstate(self): return self._makedirstate() def _makedirstate(self): """Extension point for wrapping the dirstate per-repo.""" sparsematchfn = lambda: sparse.matcher(self) return dirstate.dirstate( self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn ) def _dirstatevalidate(self, node): try: self.changelog.rev(node) return node except error.LookupError: if not self._dirstatevalidatewarned: self._dirstatevalidatewarned = True self.ui.warn( _(b"warning: ignoring unknown working parent %s!\n") % short(node) ) return nullid @storecache(narrowspec.FILENAME) def narrowpats(self): """matcher patterns for this repository's narrowspec A tuple of (includes, excludes). """ return narrowspec.load(self) @storecache(narrowspec.FILENAME) def _storenarrowmatch(self): if requirementsmod.NARROW_REQUIREMENT not in self.requirements: return matchmod.always() include, exclude = self.narrowpats return narrowspec.match(self.root, include=include, exclude=exclude) @storecache(narrowspec.FILENAME) def _narrowmatch(self): if requirementsmod.NARROW_REQUIREMENT not in self.requirements: return matchmod.always() narrowspec.checkworkingcopynarrowspec(self) include, exclude = self.narrowpats return narrowspec.match(self.root, include=include, exclude=exclude) def narrowmatch(self, match=None, includeexact=False): """matcher corresponding the the repo's narrowspec If `match` is given, then that will be intersected with the narrow matcher. If `includeexact` is True, then any exact matches from `match` will be included even if they're outside the narrowspec. """ if match: if includeexact and not self._narrowmatch.always(): # do not exclude explicitly-specified paths so that they can # be warned later on em = matchmod.exact(match.files()) nm = matchmod.unionmatcher([self._narrowmatch, em]) return matchmod.intersectmatchers(match, nm) return matchmod.intersectmatchers(match, self._narrowmatch) return self._narrowmatch def setnarrowpats(self, newincludes, newexcludes): narrowspec.save(self, newincludes, newexcludes) self.invalidate(clearfilecache=True) @unfilteredpropertycache def _quick_access_changeid_null(self): return { b'null': (nullrev, nullid), nullrev: (nullrev, nullid), nullid: (nullrev, nullid), } @unfilteredpropertycache def _quick_access_changeid_wc(self): # also fast path access to the working copy parents # however, only do it for filter that ensure wc is visible. quick = self._quick_access_changeid_null.copy() cl = self.unfiltered().changelog for node in self.dirstate.parents(): if node == nullid: continue rev = cl.index.get_rev(node) if rev is None: # unknown working copy parent case: # # skip the fast path and let higher code deal with it continue pair = (rev, node) quick[rev] = pair quick[node] = pair # also add the parents of the parents for r in cl.parentrevs(rev): if r == nullrev: continue n = cl.node(r) pair = (r, n) quick[r] = pair quick[n] = pair p1node = self.dirstate.p1() if p1node != nullid: quick[b'.'] = quick[p1node] return quick @unfilteredmethod def _quick_access_changeid_invalidate(self): if '_quick_access_changeid_wc' in vars(self): del self.__dict__['_quick_access_changeid_wc'] @property def _quick_access_changeid(self): """an helper dictionnary for __getitem__ calls This contains a list of symbol we can recognise right away without further processing. """ if self.filtername in repoview.filter_has_wc: return self._quick_access_changeid_wc return self._quick_access_changeid_null def __getitem__(self, changeid): # dealing with special cases if changeid is None: return context.workingctx(self) if isinstance(changeid, context.basectx): return changeid # dealing with multiple revisions if isinstance(changeid, slice): # wdirrev isn't contiguous so the slice shouldn't include it return [ self[i] for i in pycompat.xrange(*changeid.indices(len(self))) if i not in self.changelog.filteredrevs ] # dealing with some special values quick_access = self._quick_access_changeid.get(changeid) if quick_access is not None: rev, node = quick_access return context.changectx(self, rev, node, maybe_filtered=False) if changeid == b'tip': node = self.changelog.tip() rev = self.changelog.rev(node) return context.changectx(self, rev, node) # dealing with arbitrary values try: if isinstance(changeid, int): node = self.changelog.node(changeid) rev = changeid elif changeid == b'.': # this is a hack to delay/avoid loading obsmarkers # when we know that '.' won't be hidden node = self.dirstate.p1() rev = self.unfiltered().changelog.rev(node) elif len(changeid) == 20: try: node = changeid rev = self.changelog.rev(changeid) except error.FilteredLookupError: changeid = hex(changeid) # for the error message raise except LookupError: # check if it might have come from damaged dirstate # # XXX we could avoid the unfiltered if we had a recognizable # exception for filtered changeset access if ( self.local() and changeid in self.unfiltered().dirstate.parents() ): msg = _(b"working directory has unknown parent '%s'!") raise error.Abort(msg % short(changeid)) changeid = hex(changeid) # for the error message raise elif len(changeid) == 40: node = bin(changeid) rev = self.changelog.rev(node) else: raise error.ProgrammingError( b"unsupported changeid '%s' of type %s" % (changeid, pycompat.bytestr(type(changeid))) ) return context.changectx(self, rev, node) except (error.FilteredIndexError, error.FilteredLookupError): raise error.FilteredRepoLookupError( _(b"filtered revision '%s'") % pycompat.bytestr(changeid) ) except (IndexError, LookupError): raise error.RepoLookupError( _(b"unknown revision '%s'") % pycompat.bytestr(changeid) ) except error.WdirUnsupported: return context.workingctx(self) def __contains__(self, changeid): """True if the given changeid exists""" try: self[changeid] return True except error.RepoLookupError: return False def __nonzero__(self): return True __bool__ = __nonzero__ def __len__(self): # no need to pay the cost of repoview.changelog unfi = self.unfiltered() return len(unfi.changelog) def __iter__(self): return iter(self.changelog) def revs(self, expr, *args): """Find revisions matching a revset. The revset is specified as a string ``expr`` that may contain %-formatting to escape certain types. See ``revsetlang.formatspec``. Revset aliases from the configuration are not expanded. To expand user aliases, consider calling ``scmutil.revrange()`` or ``repo.anyrevs([expr], user=True)``. Returns a smartset.abstractsmartset, which is a list-like interface that contains integer revisions. """ tree = revsetlang.spectree(expr, *args) return revset.makematcher(tree)(self) def set(self, expr, *args): """Find revisions matching a revset and emit changectx instances. This is a convenience wrapper around ``revs()`` that iterates the result and is a generator of changectx instances. Revset aliases from the configuration are not expanded. To expand user aliases, consider calling ``scmutil.revrange()``. """ for r in self.revs(expr, *args): yield self[r] def anyrevs(self, specs, user=False, localalias=None): """Find revisions matching one of the given revsets. Revset aliases from the configuration are not expanded by default. To expand user aliases, specify ``user=True``. To provide some local definitions overriding user aliases, set ``localalias`` to ``{name: definitionstring}``. """ if specs == [b'null']: return revset.baseset([nullrev]) if specs == [b'.']: quick_data = self._quick_access_changeid.get(b'.') if quick_data is not None: return revset.baseset([quick_data[0]]) if user: m = revset.matchany( self.ui, specs, lookup=revset.lookupfn(self), localalias=localalias, ) else: m = revset.matchany(None, specs, localalias=localalias) return m(self) def url(self): return b'file:' + self.root def hook(self, name, throw=False, **args): """Call a hook, passing this repo instance. This a convenience method to aid invoking hooks. Extensions likely won't call this unless they have registered a custom hook or are replacing code that is expected to call a hook. """ return hook.hook(self.ui, self, name, throw, **args) @filteredpropertycache def _tagscache(self): """Returns a tagscache object that contains various tags related caches.""" # This simplifies its cache management by having one decorated # function (this one) and the rest simply fetch things from it. class tagscache(object): def __init__(self): # These two define the set of tags for this repository. tags # maps tag name to node; tagtypes maps tag name to 'global' or # 'local'. (Global tags are defined by .hgtags across all # heads, and local tags are defined in .hg/localtags.) # They constitute the in-memory cache of tags. self.tags = self.tagtypes = None self.nodetagscache = self.tagslist = None cache = tagscache() cache.tags, cache.tagtypes = self._findtags() return cache def tags(self): '''return a mapping of tag to node''' t = {} if self.changelog.filteredrevs: tags, tt = self._findtags() else: tags = self._tagscache.tags rev = self.changelog.rev for k, v in pycompat.iteritems(tags): try: # ignore tags to unknown nodes rev(v) t[k] = v except (error.LookupError, ValueError): pass return t def _findtags(self): """Do the hard work of finding tags. Return a pair of dicts (tags, tagtypes) where tags maps tag name to node, and tagtypes maps tag name to a string like \'global\' or \'local\'. Subclasses or extensions are free to add their own tags, but should be aware that the returned dicts will be retained for the duration of the localrepo object.""" # XXX what tagtype should subclasses/extensions use? Currently # mq and bookmarks add tags, but do not set the tagtype at all. # Should each extension invent its own tag type? Should there # be one tagtype for all such "virtual" tags? Or is the status # quo fine? # map tag name to (node, hist) alltags = tagsmod.findglobaltags(self.ui, self) # map tag name to tag type tagtypes = {tag: b'global' for tag in alltags} tagsmod.readlocaltags(self.ui, self, alltags, tagtypes) # Build the return dicts. Have to re-encode tag names because # the tags module always uses UTF-8 (in order not to lose info # writing to the cache), but the rest of Mercurial wants them in # local encoding. tags = {} for (name, (node, hist)) in pycompat.iteritems(alltags): if node != nullid: tags[encoding.tolocal(name)] = node tags[b'tip'] = self.changelog.tip() tagtypes = { encoding.tolocal(name): value for (name, value) in pycompat.iteritems(tagtypes) } return (tags, tagtypes) def tagtype(self, tagname): """ return the type of the given tag. result can be: 'local' : a local tag 'global' : a global tag None : tag does not exist """ return self._tagscache.tagtypes.get(tagname) def tagslist(self): '''return a list of tags ordered by revision''' if not self._tagscache.tagslist: l = [] for t, n in pycompat.iteritems(self.tags()): l.append((self.changelog.rev(n), t, n)) self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)] return self._tagscache.tagslist def nodetags(self, node): '''return the tags associated with a node''' if not self._tagscache.nodetagscache: nodetagscache = {} for t, n in pycompat.iteritems(self._tagscache.tags): nodetagscache.setdefault(n, []).append(t) for tags in pycompat.itervalues(nodetagscache): tags.sort() self._tagscache.nodetagscache = nodetagscache return self._tagscache.nodetagscache.get(node, []) def nodebookmarks(self, node): """return the list of bookmarks pointing to the specified node""" return self._bookmarks.names(node) def branchmap(self): """returns a dictionary {branch: [branchheads]} with branchheads ordered by increasing revision number""" return self._branchcaches[self] @unfilteredmethod def revbranchcache(self): if not self._revbranchcache: self._revbranchcache = branchmap.revbranchcache(self.unfiltered()) return self._revbranchcache def register_changeset(self, rev, changelogrevision): self.revbranchcache().setdata(rev, changelogrevision) def branchtip(self, branch, ignoremissing=False): """return the tip node for a given branch If ignoremissing is True, then this method will not raise an error. This is helpful for callers that only expect None for a missing branch (e.g. namespace). """ try: return self.branchmap().branchtip(branch) except KeyError: if not ignoremissing: raise error.RepoLookupError(_(b"unknown branch '%s'") % branch) else: pass def lookup(self, key): node = scmutil.revsymbol(self, key).node() if node is None: raise error.RepoLookupError(_(b"unknown revision '%s'") % key) return node def lookupbranch(self, key): if self.branchmap().hasbranch(key): return key return scmutil.revsymbol(self, key).branch() def known(self, nodes): cl = self.changelog get_rev = cl.index.get_rev filtered = cl.filteredrevs result = [] for n in nodes: r = get_rev(n) resp = not (r is None or r in filtered) result.append(resp) return result def local(self): return self def publishing(self): # it's safe (and desirable) to trust the publish flag unconditionally # so that we don't finalize changes shared between users via ssh or nfs return self.ui.configbool(b'phases', b'publish', untrusted=True) def cancopy(self): # so statichttprepo's override of local() works if not self.local(): return False if not self.publishing(): return True # if publishing we can't copy if there is filtered content return not self.filtered(b'visible').changelog.filteredrevs def shared(self): '''the type of shared repository (None if not shared)''' if self.sharedpath != self.path: return b'store' return None def wjoin(self, f, *insidef): return self.vfs.reljoin(self.root, f, *insidef) def setparents(self, p1, p2=nullid): self[None].setparents(p1, p2) self._quick_access_changeid_invalidate() def filectx(self, path, changeid=None, fileid=None, changectx=None): """changeid must be a changeset revision, if specified. fileid can be a file revision or node.""" return context.filectx( self, path, changeid, fileid, changectx=changectx ) def getcwd(self): return self.dirstate.getcwd() def pathto(self, f, cwd=None): return self.dirstate.pathto(f, cwd) def _loadfilter(self, filter): if filter not in self._filterpats: l = [] for pat, cmd in self.ui.configitems(filter): if cmd == b'!': continue mf = matchmod.match(self.root, b'', [pat]) fn = None params = cmd for name, filterfn in pycompat.iteritems(self._datafilters): if cmd.startswith(name): fn = filterfn params = cmd[len(name) :].lstrip() break if not fn: fn = lambda s, c, **kwargs: procutil.filter(s, c) fn.__name__ = 'commandfilter' # Wrap old filters not supporting keyword arguments if not pycompat.getargspec(fn)[2]: oldfn = fn fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c) fn.__name__ = 'compat-' + oldfn.__name__ l.append((mf, fn, params)) self._filterpats[filter] = l return self._filterpats[filter] def _filter(self, filterpats, filename, data): for mf, fn, cmd in filterpats: if mf(filename): self.ui.debug( b"filtering %s through %s\n" % (filename, cmd or pycompat.sysbytes(fn.__name__)) ) data = fn(data, cmd, ui=self.ui, repo=self, filename=filename) break return data @unfilteredpropertycache def _encodefilterpats(self): return self._loadfilter(b'encode') @unfilteredpropertycache def _decodefilterpats(self): return self._loadfilter(b'decode') def adddatafilter(self, name, filter): self._datafilters[name] = filter def wread(self, filename): if self.wvfs.islink(filename): data = self.wvfs.readlink(filename) else: data = self.wvfs.read(filename) return self._filter(self._encodefilterpats, filename, data) def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs): """write ``data`` into ``filename`` in the working directory This returns length of written (maybe decoded) data. """ data = self._filter(self._decodefilterpats, filename, data) if b'l' in flags: self.wvfs.symlink(data, filename) else: self.wvfs.write( filename, data, backgroundclose=backgroundclose, **kwargs ) if b'x' in flags: self.wvfs.setflags(filename, False, True) else: self.wvfs.setflags(filename, False, False) return len(data) def wwritedata(self, filename, data): return self._filter(self._decodefilterpats, filename, data) def currenttransaction(self): """return the current transaction or None if non exists""" if self._transref: tr = self._transref() else: tr = None if tr and tr.running(): return tr return None def transaction(self, desc, report=None): if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool( b'devel', b'check-locks' ): if self._currentlock(self._lockref) is None: raise error.ProgrammingError(b'transaction requires locking') tr = self.currenttransaction() if tr is not None: return tr.nest(name=desc) # abort here if the journal already exists if self.svfs.exists(b"journal"): raise error.RepoError( _(b"abandoned transaction found"), hint=_(b"run 'hg recover' to clean up transaction"), ) idbase = b"%.40f#%f" % (random.random(), time.time()) ha = hex(hashutil.sha1(idbase).digest()) txnid = b'TXN:' + ha self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid) self._writejournal(desc) renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()] if report: rp = report else: rp = self.ui.warn vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/ # we must avoid cyclic reference between repo and transaction. reporef = weakref.ref(self) # Code to track tag movement # # Since tags are all handled as file content, it is actually quite hard # to track these movement from a code perspective. So we fallback to a # tracking at the repository level. One could envision to track changes # to the '.hgtags' file through changegroup apply but that fails to # cope with case where transaction expose new heads without changegroup # being involved (eg: phase movement). # # For now, We gate the feature behind a flag since this likely comes # with performance impacts. The current code run more often than needed # and do not use caches as much as it could. The current focus is on # the behavior of the feature so we disable it by default. The flag # will be removed when we are happy with the performance impact. # # Once this feature is no longer experimental move the following # documentation to the appropriate help section: # # The ``HG_TAG_MOVED`` variable will be set if the transaction touched # tags (new or changed or deleted tags). In addition the details of # these changes are made available in a file at: # ``REPOROOT/.hg/changes/tags.changes``. # Make sure you check for HG_TAG_MOVED before reading that file as it # might exist from a previous transaction even if no tag were touched # in this one. Changes are recorded in a line base format:: # # \n # # Actions are defined as follow: # "-R": tag is removed, # "+A": tag is added, # "-M": tag is moved (old value), # "+M": tag is moved (new value), tracktags = lambda x: None # experimental config: experimental.hook-track-tags shouldtracktags = self.ui.configbool( b'experimental', b'hook-track-tags' ) if desc != b'strip' and shouldtracktags: oldheads = self.changelog.headrevs() def tracktags(tr2): repo = reporef() oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads) newheads = repo.changelog.headrevs() newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads) # notes: we compare lists here. # As we do it only once buiding set would not be cheaper changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes) if changes: tr2.hookargs[b'tag_moved'] = b'1' with repo.vfs( b'changes/tags.changes', b'w', atomictemp=True ) as changesfile: # note: we do not register the file to the transaction # because we needs it to still exist on the transaction # is close (for txnclose hooks) tagsmod.writediff(changesfile, changes) def validate(tr2): """will run pre-closing hooks""" # XXX the transaction API is a bit lacking here so we take a hacky # path for now # # We cannot add this as a "pending" hooks since the 'tr.hookargs' # dict is copied before these run. In addition we needs the data # available to in memory hooks too. # # Moreover, we also need to make sure this runs before txnclose # hooks and there is no "pending" mechanism that would execute # logic only if hooks are about to run. # # Fixing this limitation of the transaction is also needed to track # other families of changes (bookmarks, phases, obsolescence). # # This will have to be fixed before we remove the experimental # gating. tracktags(tr2) repo = reporef() singleheadopt = (b'experimental', b'single-head-per-branch') singlehead = repo.ui.configbool(*singleheadopt) if singlehead: singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1] accountclosed = singleheadsub.get( b"account-closed-heads", False ) if singleheadsub.get(b"public-changes-only", False): filtername = b"immutable" else: filtername = b"visible" scmutil.enforcesinglehead( repo, tr2, desc, accountclosed, filtername ) if hook.hashook(repo.ui, b'pretxnclose-bookmark'): for name, (old, new) in sorted( tr.changes[b'bookmarks'].items() ): args = tr.hookargs.copy() args.update(bookmarks.preparehookargs(name, old, new)) repo.hook( b'pretxnclose-bookmark', throw=True, **pycompat.strkwargs(args) ) if hook.hashook(repo.ui, b'pretxnclose-phase'): cl = repo.unfiltered().changelog for revs, (old, new) in tr.changes[b'phases']: for rev in revs: args = tr.hookargs.copy() node = hex(cl.node(rev)) args.update(phases.preparehookargs(node, old, new)) repo.hook( b'pretxnclose-phase', throw=True, **pycompat.strkwargs(args) ) repo.hook( b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs) ) def releasefn(tr, success): repo = reporef() if repo is None: # If the repo has been GC'd (and this release function is being # called from transaction.__del__), there's not much we can do, # so just leave the unfinished transaction there and let the # user run `hg recover`. return if success: # this should be explicitly invoked here, because # in-memory changes aren't written out at closing # transaction, if tr.addfilegenerator (via # dirstate.write or so) isn't invoked while # transaction running repo.dirstate.write(None) else: # discard all changes (including ones already written # out) in this transaction narrowspec.restorebackup(self, b'journal.narrowspec') narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate') repo.dirstate.restorebackup(None, b'journal.dirstate') repo.invalidate(clearfilecache=True) tr = transaction.transaction( rp, self.svfs, vfsmap, b"journal", b"undo", aftertrans(renames), self.store.createmode, validator=validate, releasefn=releasefn, checkambigfiles=_cachedfiles, name=desc, ) tr.changes[b'origrepolen'] = len(self) tr.changes[b'obsmarkers'] = set() tr.changes[b'phases'] = [] tr.changes[b'bookmarks'] = {} tr.hookargs[b'txnid'] = txnid tr.hookargs[b'txnname'] = desc tr.hookargs[b'changes'] = tr.changes # note: writing the fncache only during finalize mean that the file is # outdated when running hooks. As fncache is used for streaming clone, # this is not expected to break anything that happen during the hooks. tr.addfinalize(b'flush-fncache', self.store.write) def txnclosehook(tr2): """To be run if transaction is successful, will schedule a hook run""" # Don't reference tr2 in hook() so we don't hold a reference. # This reduces memory consumption when there are multiple # transactions per lock. This can likely go away if issue5045 # fixes the function accumulation. hookargs = tr2.hookargs def hookfunc(unused_success): repo = reporef() if hook.hashook(repo.ui, b'txnclose-bookmark'): bmchanges = sorted(tr.changes[b'bookmarks'].items()) for name, (old, new) in bmchanges: args = tr.hookargs.copy() args.update(bookmarks.preparehookargs(name, old, new)) repo.hook( b'txnclose-bookmark', throw=False, **pycompat.strkwargs(args) ) if hook.hashook(repo.ui, b'txnclose-phase'): cl = repo.unfiltered().changelog phasemv = sorted( tr.changes[b'phases'], key=lambda r: r[0][0] ) for revs, (old, new) in phasemv: for rev in revs: args = tr.hookargs.copy() node = hex(cl.node(rev)) args.update(phases.preparehookargs(node, old, new)) repo.hook( b'txnclose-phase', throw=False, **pycompat.strkwargs(args) ) repo.hook( b'txnclose', throw=False, **pycompat.strkwargs(hookargs) ) reporef()._afterlock(hookfunc) tr.addfinalize(b'txnclose-hook', txnclosehook) # Include a leading "-" to make it happen before the transaction summary # reports registered via scmutil.registersummarycallback() whose names # are 00-txnreport etc. That way, the caches will be warm when the # callbacks run. tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr)) def txnaborthook(tr2): """To be run if transaction is aborted""" reporef().hook( b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs) ) tr.addabort(b'txnabort-hook', txnaborthook) # avoid eager cache invalidation. in-memory data should be identical # to stored data if transaction has no error. tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats) self._transref = weakref.ref(tr) scmutil.registersummarycallback(self, tr, desc) return tr def _journalfiles(self): return ( (self.svfs, b'journal'), (self.svfs, b'journal.narrowspec'), (self.vfs, b'journal.narrowspec.dirstate'), (self.vfs, b'journal.dirstate'), (self.vfs, b'journal.branch'), (self.vfs, b'journal.desc'), (bookmarks.bookmarksvfs(self), b'journal.bookmarks'), (self.svfs, b'journal.phaseroots'), ) def undofiles(self): return [(vfs, undoname(x)) for vfs, x in self._journalfiles()] @unfilteredmethod def _writejournal(self, desc): self.dirstate.savebackup(None, b'journal.dirstate') narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate') narrowspec.savebackup(self, b'journal.narrowspec') self.vfs.write( b"journal.branch", encoding.fromlocal(self.dirstate.branch()) ) self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc)) bookmarksvfs = bookmarks.bookmarksvfs(self) bookmarksvfs.write( b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks") ) self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots")) def recover(self): with self.lock(): if self.svfs.exists(b"journal"): self.ui.status(_(b"rolling back interrupted transaction\n")) vfsmap = { b'': self.svfs, b'plain': self.vfs, } transaction.rollback( self.svfs, vfsmap, b"journal", self.ui.warn, checkambigfiles=_cachedfiles, ) self.invalidate() return True else: self.ui.warn(_(b"no interrupted transaction available\n")) return False def rollback(self, dryrun=False, force=False): wlock = lock = dsguard = None try: wlock = self.wlock() lock = self.lock() if self.svfs.exists(b"undo"): dsguard = dirstateguard.dirstateguard(self, b'rollback') return self._rollback(dryrun, force, dsguard) else: self.ui.warn(_(b"no rollback information available\n")) return 1 finally: release(dsguard, lock, wlock) @unfilteredmethod # Until we get smarter cache management def _rollback(self, dryrun, force, dsguard): ui = self.ui try: args = self.vfs.read(b'undo.desc').splitlines() (oldlen, desc, detail) = (int(args[0]), args[1], None) if len(args) >= 3: detail = args[2] oldtip = oldlen - 1 if detail and ui.verbose: msg = _( b'repository tip rolled back to revision %d' b' (undo %s: %s)\n' ) % (oldtip, desc, detail) else: msg = _( b'repository tip rolled back to revision %d (undo %s)\n' ) % (oldtip, desc) except IOError: msg = _(b'rolling back unknown transaction\n') desc = None if not force and self[b'.'] != self[b'tip'] and desc == b'commit': raise error.Abort( _( b'rollback of last commit while not checked out ' b'may lose data' ), hint=_(b'use -f to force'), ) ui.status(msg) if dryrun: return 0 parents = self.dirstate.parents() self.destroying() vfsmap = {b'plain': self.vfs, b'': self.svfs} transaction.rollback( self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles ) bookmarksvfs = bookmarks.bookmarksvfs(self) if bookmarksvfs.exists(b'undo.bookmarks'): bookmarksvfs.rename( b'undo.bookmarks', b'bookmarks', checkambig=True ) if self.svfs.exists(b'undo.phaseroots'): self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True) self.invalidate() has_node = self.changelog.index.has_node parentgone = any(not has_node(p) for p in parents) if parentgone: # prevent dirstateguard from overwriting already restored one dsguard.close() narrowspec.restorebackup(self, b'undo.narrowspec') narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate') self.dirstate.restorebackup(None, b'undo.dirstate') try: branch = self.vfs.read(b'undo.branch') self.dirstate.setbranch(encoding.tolocal(branch)) except IOError: ui.warn( _( b'named branch could not be reset: ' b'current branch is still \'%s\'\n' ) % self.dirstate.branch() ) parents = tuple([p.rev() for p in self[None].parents()]) if len(parents) > 1: ui.status( _( b'working directory now based on ' b'revisions %d and %d\n' ) % parents ) else: ui.status( _(b'working directory now based on revision %d\n') % parents ) mergestatemod.mergestate.clean(self) # TODO: if we know which new heads may result from this rollback, pass # them to destroy(), which will prevent the branchhead cache from being # invalidated. self.destroyed() return 0 def _buildcacheupdater(self, newtransaction): """called during transaction to build the callback updating cache Lives on the repository to help extension who might want to augment this logic. For this purpose, the created transaction is passed to the method. """ # we must avoid cyclic reference between repo and transaction. reporef = weakref.ref(self) def updater(tr): repo = reporef() repo.updatecaches(tr) return updater @unfilteredmethod def updatecaches(self, tr=None, full=False): """warm appropriate caches If this function is called after a transaction closed. The transaction will be available in the 'tr' argument. This can be used to selectively update caches relevant to the changes in that transaction. If 'full' is set, make sure all caches the function knows about have up-to-date data. Even the ones usually loaded more lazily. """ if tr is not None and tr.hookargs.get(b'source') == b'strip': # During strip, many caches are invalid but # later call to `destroyed` will refresh them. return if tr is None or tr.changes[b'origrepolen'] < len(self): # accessing the 'served' branchmap should refresh all the others, self.ui.debug(b'updating the branch cache\n') self.filtered(b'served').branchmap() self.filtered(b'served.hidden').branchmap() if full: unfi = self.unfiltered() self.changelog.update_caches(transaction=tr) self.manifestlog.update_caches(transaction=tr) rbc = unfi.revbranchcache() for r in unfi.changelog: rbc.branchinfo(r) rbc.write() # ensure the working copy parents are in the manifestfulltextcache for ctx in self[b'.'].parents(): ctx.manifest() # accessing the manifest is enough # accessing fnode cache warms the cache tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs()) # accessing tags warm the cache self.tags() self.filtered(b'served').tags() # The `full` arg is documented as updating even the lazily-loaded # caches immediately, so we're forcing a write to cause these caches # to be warmed up even if they haven't explicitly been requested # yet (if they've never been used by hg, they won't ever have been # written, even if they're a subset of another kind of cache that # *has* been used). for filt in repoview.filtertable.keys(): filtered = self.filtered(filt) filtered.branchmap().write(filtered) def invalidatecaches(self): if '_tagscache' in vars(self): # can't use delattr on proxy del self.__dict__['_tagscache'] self._branchcaches.clear() self.invalidatevolatilesets() self._sparsesignaturecache.clear() def invalidatevolatilesets(self): self.filteredrevcache.clear() obsolete.clearobscaches(self) self._quick_access_changeid_invalidate() def invalidatedirstate(self): """Invalidates the dirstate, causing the next call to dirstate to check if it was modified since the last time it was read, rereading it if it has. This is different to dirstate.invalidate() that it doesn't always rereads the dirstate. Use dirstate.invalidate() if you want to explicitly read the dirstate again (i.e. restoring it to a previous known good state).""" if hasunfilteredcache(self, 'dirstate'): for k in self.dirstate._filecache: try: delattr(self.dirstate, k) except AttributeError: pass delattr(self.unfiltered(), 'dirstate') def invalidate(self, clearfilecache=False): """Invalidates both store and non-store parts other than dirstate If a transaction is running, invalidation of store is omitted, because discarding in-memory changes might cause inconsistency (e.g. incomplete fncache causes unintentional failure, but redundant one doesn't). """ unfiltered = self.unfiltered() # all file caches are stored unfiltered for k in list(self._filecache.keys()): # dirstate is invalidated separately in invalidatedirstate() if k == b'dirstate': continue if ( k == b'changelog' and self.currenttransaction() and self.changelog._delayed ): # The changelog object may store unwritten revisions. We don't # want to lose them. # TODO: Solve the problem instead of working around it. continue if clearfilecache: del self._filecache[k] try: delattr(unfiltered, k) except AttributeError: pass self.invalidatecaches() if not self.currenttransaction(): # TODO: Changing contents of store outside transaction # causes inconsistency. We should make in-memory store # changes detectable, and abort if changed. self.store.invalidatecaches() def invalidateall(self): """Fully invalidates both store and non-store parts, causing the subsequent operation to reread any outside changes.""" # extension should hook this to invalidate its caches self.invalidate() self.invalidatedirstate() @unfilteredmethod def _refreshfilecachestats(self, tr): """Reload stats of cached files so that they are flagged as valid""" for k, ce in self._filecache.items(): k = pycompat.sysstr(k) if k == 'dirstate' or k not in self.__dict__: continue ce.refresh() def _lock( self, vfs, lockname, wait, releasefn, acquirefn, desc, ): timeout = 0 warntimeout = 0 if wait: timeout = self.ui.configint(b"ui", b"timeout") warntimeout = self.ui.configint(b"ui", b"timeout.warn") # internal config: ui.signal-safe-lock signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock') l = lockmod.trylock( self.ui, vfs, lockname, timeout, warntimeout, releasefn=releasefn, acquirefn=acquirefn, desc=desc, signalsafe=signalsafe, ) return l def _afterlock(self, callback): """add a callback to be run when the repository is fully unlocked The callback will be executed when the outermost lock is released (with wlock being higher level than 'lock').""" for ref in (self._wlockref, self._lockref): l = ref and ref() if l and l.held: l.postrelease.append(callback) break else: # no lock have been found. callback(True) def lock(self, wait=True): """Lock the repository store (.hg/store) and return a weak reference to the lock. Use this before modifying the store (e.g. committing or stripping). If you are opening a transaction, get a lock as well.) If both 'lock' and 'wlock' must be acquired, ensure you always acquires 'wlock' first to avoid a dead-lock hazard.""" l = self._currentlock(self._lockref) if l is not None: l.lock() return l l = self._lock( vfs=self.svfs, lockname=b"lock", wait=wait, releasefn=None, acquirefn=self.invalidate, desc=_(b'repository %s') % self.origroot, ) self._lockref = weakref.ref(l) return l def wlock(self, wait=True): """Lock the non-store parts of the repository (everything under .hg except .hg/store) and return a weak reference to the lock. Use this before modifying files in .hg. If both 'lock' and 'wlock' must be acquired, ensure you always acquires 'wlock' first to avoid a dead-lock hazard.""" l = self._wlockref and self._wlockref() if l is not None and l.held: l.lock() return l # We do not need to check for non-waiting lock acquisition. Such # acquisition would not cause dead-lock as they would just fail. if wait and ( self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(b'devel', b'check-locks') ): if self._currentlock(self._lockref) is not None: self.ui.develwarn(b'"wlock" acquired after "lock"') def unlock(): if self.dirstate.pendingparentchange(): self.dirstate.invalidate() else: self.dirstate.write(None) self._filecache[b'dirstate'].refresh() l = self._lock( self.vfs, b"wlock", wait, unlock, self.invalidatedirstate, _(b'working directory of %s') % self.origroot, ) self._wlockref = weakref.ref(l) return l def _currentlock(self, lockref): """Returns the lock if it's held, or None if it's not.""" if lockref is None: return None l = lockref() if l is None or not l.held: return None return l def currentwlock(self): """Returns the wlock if it's held, or None if it's not.""" return self._currentlock(self._wlockref) def checkcommitpatterns(self, wctx, match, status, fail): """check for commit arguments that aren't committable""" if match.isexact() or match.prefix(): matched = set(status.modified + status.added + status.removed) for f in match.files(): f = self.dirstate.normalize(f) if f == b'.' or f in matched or f in wctx.substate: continue if f in status.deleted: fail(f, _(b'file not found!')) # Is it a directory that exists or used to exist? if self.wvfs.isdir(f) or wctx.p1().hasdir(f): d = f + b'/' for mf in matched: if mf.startswith(d): break else: fail(f, _(b"no match under directory!")) elif f not in self.dirstate: fail(f, _(b"file not tracked!")) @unfilteredmethod def commit( self, text=b"", user=None, date=None, match=None, force=False, editor=None, extra=None, ): """Add a new revision to current repository. Revision information is gathered from the working directory, match can be used to filter the committed files. If editor is supplied, it is called to get a commit message. """ if extra is None: extra = {} def fail(f, msg): raise error.InputError(b'%s: %s' % (f, msg)) if not match: match = matchmod.always() if not force: match.bad = fail # lock() for recent changelog (see issue4368) with self.wlock(), self.lock(): wctx = self[None] merge = len(wctx.parents()) > 1 if not force and merge and not match.always(): raise error.Abort( _( b'cannot partially commit a merge ' b'(do not specify files or patterns)' ) ) status = self.status(match=match, clean=force) if force: status.modified.extend( status.clean ) # mq may commit clean files # check subrepos subs, commitsubs, newstate = subrepoutil.precommit( self.ui, wctx, status, match, force=force ) # make sure all explicit patterns are matched if not force: self.checkcommitpatterns(wctx, match, status, fail) cctx = context.workingcommitctx( self, status, text, user, date, extra ) ms = mergestatemod.mergestate.read(self) mergeutil.checkunresolved(ms) # internal config: ui.allowemptycommit if cctx.isempty() and not self.ui.configbool( b'ui', b'allowemptycommit' ): self.ui.debug(b'nothing to commit, clearing merge state\n') ms.reset() return None if merge and cctx.deleted(): raise error.Abort(_(b"cannot commit merge with missing files")) if editor: cctx._text = editor(self, cctx, subs) edited = text != cctx._text # Save commit message in case this transaction gets rolled back # (e.g. by a pretxncommit hook). Leave the content alone on # the assumption that the user will use the same editor again. msgfn = self.savecommitmessage(cctx._text) # commit subs and write new state if subs: uipathfn = scmutil.getuipathfn(self) for s in sorted(commitsubs): sub = wctx.sub(s) self.ui.status( _(b'committing subrepository %s\n') % uipathfn(subrepoutil.subrelpath(sub)) ) sr = sub.commit(cctx._text, user, date) newstate[s] = (newstate[s][0], sr) subrepoutil.writestate(self, newstate) p1, p2 = self.dirstate.parents() hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'') try: self.hook( b"precommit", throw=True, parent1=hookp1, parent2=hookp2 ) with self.transaction(b'commit'): ret = self.commitctx(cctx, True) # update bookmarks, dirstate and mergestate bookmarks.update(self, [p1, p2], ret) cctx.markcommitted(ret) ms.reset() except: # re-raises if edited: self.ui.write( _(b'note: commit message saved in %s\n') % msgfn ) self.ui.write( _( b"note: use 'hg commit --logfile " b".hg/last-message.txt --edit' to reuse it\n" ) ) raise def commithook(unused_success): # hack for command that use a temporary commit (eg: histedit) # temporary commit got stripped before hook release if self.changelog.hasnode(ret): self.hook( b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2 ) self._afterlock(commithook) return ret @unfilteredmethod def commitctx(self, ctx, error=False, origctx=None): return commit.commitctx(self, ctx, error=error, origctx=origctx) @unfilteredmethod def destroying(self): """Inform the repository that nodes are about to be destroyed. Intended for use by strip and rollback, so there's a common place for anything that has to be done before destroying history. This is mostly useful for saving state that is in memory and waiting to be flushed when the current lock is released. Because a call to destroyed is imminent, the repo will be invalidated causing those changes to stay in memory (waiting for the next unlock), or vanish completely. """ # When using the same lock to commit and strip, the phasecache is left # dirty after committing. Then when we strip, the repo is invalidated, # causing those changes to disappear. if '_phasecache' in vars(self): self._phasecache.write() @unfilteredmethod def destroyed(self): """Inform the repository that nodes have been destroyed. Intended for use by strip and rollback, so there's a common place for anything that has to be done after destroying history. """ # When one tries to: # 1) destroy nodes thus calling this method (e.g. strip) # 2) use phasecache somewhere (e.g. commit) # # then 2) will fail because the phasecache contains nodes that were # removed. We can either remove phasecache from the filecache, # causing it to reload next time it is accessed, or simply filter # the removed nodes now and write the updated cache. self._phasecache.filterunknown(self) self._phasecache.write() # refresh all repository caches self.updatecaches() # Ensure the persistent tag cache is updated. Doing it now # means that the tag cache only has to worry about destroyed # heads immediately after a strip/rollback. That in turn # guarantees that "cachetip == currenttip" (comparing both rev # and node) always means no nodes have been added or destroyed. # XXX this is suboptimal when qrefresh'ing: we strip the current # head, refresh the tag cache, then immediately add a new head. # But I think doing it this way is necessary for the "instant # tag cache retrieval" case to work. self.invalidate() def status( self, node1=b'.', node2=None, match=None, ignored=False, clean=False, unknown=False, listsubrepos=False, ): '''a convenience method that calls node1.status(node2)''' return self[node1].status( node2, match, ignored, clean, unknown, listsubrepos ) def addpostdsstatus(self, ps): """Add a callback to run within the wlock, at the point at which status fixups happen. On status completion, callback(wctx, status) will be called with the wlock held, unless the dirstate has changed from underneath or the wlock couldn't be grabbed. Callbacks should not capture and use a cached copy of the dirstate -- it might change in the meanwhile. Instead, they should access the dirstate via wctx.repo().dirstate. This list is emptied out after each status run -- extensions should make sure it adds to this list each time dirstate.status is called. Extensions should also make sure they don't call this for statuses that don't involve the dirstate. """ # The list is located here for uniqueness reasons -- it is actually # managed by the workingctx, but that isn't unique per-repo. self._postdsstatus.append(ps) def postdsstatus(self): """Used by workingctx to get the list of post-dirstate-status hooks.""" return self._postdsstatus def clearpostdsstatus(self): """Used by workingctx to clear post-dirstate-status hooks.""" del self._postdsstatus[:] def heads(self, start=None): if start is None: cl = self.changelog headrevs = reversed(cl.headrevs()) return [cl.node(rev) for rev in headrevs] heads = self.changelog.heads(start) # sort the output in rev descending order return sorted(heads, key=self.changelog.rev, reverse=True) def branchheads(self, branch=None, start=None, closed=False): """return a (possibly filtered) list of heads for the given branch Heads are returned in topological order, from newest to oldest. If branch is None, use the dirstate branch. If start is not None, return only heads reachable from start. If closed is True, return heads that are marked as closed as well. """ if branch is None: branch = self[None].branch() branches = self.branchmap() if not branches.hasbranch(branch): return [] # the cache returns heads ordered lowest to highest bheads = list(reversed(branches.branchheads(branch, closed=closed))) if start is not None: # filter out the heads that cannot be reached from startrev fbheads = set(self.changelog.nodesbetween([start], bheads)[2]) bheads = [h for h in bheads if h in fbheads] return bheads def branches(self, nodes): if not nodes: nodes = [self.changelog.tip()] b = [] for n in nodes: t = n while True: p = self.changelog.parents(n) if p[1] != nullid or p[0] == nullid: b.append((t, n, p[0], p[1])) break n = p[0] return b def between(self, pairs): r = [] for top, bottom in pairs: n, l, i = top, [], 0 f = 1 while n != bottom and n != nullid: p = self.changelog.parents(n)[0] if i == f: l.append(n) f = f * 2 n = p i += 1 r.append(l) return r def checkpush(self, pushop): """Extensions can override this function if additional checks have to be performed before pushing, or call it if they override push command. """ @unfilteredpropertycache def prepushoutgoinghooks(self): """Return util.hooks consists of a pushop with repo, remote, outgoing methods, which are called before pushing changesets. """ return util.hooks() def pushkey(self, namespace, key, old, new): try: tr = self.currenttransaction() hookargs = {} if tr is not None: hookargs.update(tr.hookargs) hookargs = pycompat.strkwargs(hookargs) hookargs['namespace'] = namespace hookargs['key'] = key hookargs['old'] = old hookargs['new'] = new self.hook(b'prepushkey', throw=True, **hookargs) except error.HookAbort as exc: self.ui.write_err(_(b"pushkey-abort: %s\n") % exc) if exc.hint: self.ui.write_err(_(b"(%s)\n") % exc.hint) return False self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key)) ret = pushkey.push(self, namespace, key, old, new) def runhook(unused_success): self.hook( b'pushkey', namespace=namespace, key=key, old=old, new=new, ret=ret, ) self._afterlock(runhook) return ret def listkeys(self, namespace): self.hook(b'prelistkeys', throw=True, namespace=namespace) self.ui.debug(b'listing keys for "%s"\n' % namespace) values = pushkey.list(self, namespace) self.hook(b'listkeys', namespace=namespace, values=values) return values def debugwireargs(self, one, two, three=None, four=None, five=None): '''used to test argument passing over the wire''' return b"%s %s %s %s %s" % ( one, two, pycompat.bytestr(three), pycompat.bytestr(four), pycompat.bytestr(five), ) def savecommitmessage(self, text): fp = self.vfs(b'last-message.txt', b'wb') try: fp.write(text) finally: fp.close() return self.pathto(fp.name[len(self.root) + 1 :]) # used to avoid circular references so destructors work def aftertrans(files): renamefiles = [tuple(t) for t in files] def a(): for vfs, src, dest in renamefiles: # if src and dest refer to a same file, vfs.rename is a no-op, # leaving both src and dest on disk. delete dest to make sure # the rename couldn't be such a no-op. vfs.tryunlink(dest) try: vfs.rename(src, dest) except OSError: # journal file does not yet exist pass return a def undoname(fn): base, name = os.path.split(fn) assert name.startswith(b'journal') return os.path.join(base, name.replace(b'journal', b'undo', 1)) def instance(ui, path, create, intents=None, createopts=None): localpath = util.urllocalpath(path) if create: createrepository(ui, localpath, createopts=createopts) return makelocalrepository(ui, localpath, intents=intents) def islocal(path): return True def defaultcreateopts(ui, createopts=None): """Populate the default creation options for a repository. A dictionary of explicitly requested creation options can be passed in. Missing keys will be populated. """ createopts = dict(createopts or {}) if b'backend' not in createopts: # experimental config: storage.new-repo-backend createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend') return createopts def newreporequirements(ui, createopts): """Determine the set of requirements for a new local repository. Extensions can wrap this function to specify custom requirements for new repositories. """ # If the repo is being created from a shared repository, we copy # its requirements. if b'sharedrepo' in createopts: requirements = set(createopts[b'sharedrepo'].requirements) if createopts.get(b'sharedrelative'): requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT) else: requirements.add(requirementsmod.SHARED_REQUIREMENT) return requirements if b'backend' not in createopts: raise error.ProgrammingError( b'backend key not present in createopts; ' b'was defaultcreateopts() called?' ) if createopts[b'backend'] != b'revlogv1': raise error.Abort( _( b'unable to determine repository requirements for ' b'storage backend: %s' ) % createopts[b'backend'] ) requirements = {requirementsmod.REVLOGV1_REQUIREMENT} if ui.configbool(b'format', b'usestore'): requirements.add(b'store') if ui.configbool(b'format', b'usefncache'): requirements.add(b'fncache') if ui.configbool(b'format', b'dotencode'): - requirements.add(b'dotencode') + requirements.add(requirementsmod.DOTENCODE_REQUIREMENT) compengines = ui.configlist(b'format', b'revlog-compression') for compengine in compengines: if compengine in util.compengines: break else: raise error.Abort( _( b'compression engines %s defined by ' b'format.revlog-compression not available' ) % b', '.join(b'"%s"' % e for e in compengines), hint=_( b'run "hg debuginstall" to list available ' b'compression engines' ), ) # zlib is the historical default and doesn't need an explicit requirement. if compengine == b'zstd': requirements.add(b'revlog-compression-zstd') elif compengine != b'zlib': requirements.add(b'exp-compression-%s' % compengine) if scmutil.gdinitconfig(ui): requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT) if ui.configbool(b'format', b'sparse-revlog'): requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT) # experimental config: format.exp-use-side-data if ui.configbool(b'format', b'exp-use-side-data'): requirements.add(requirementsmod.SIDEDATA_REQUIREMENT) # experimental config: format.exp-use-copies-side-data-changeset if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'): requirements.add(requirementsmod.SIDEDATA_REQUIREMENT) requirements.add(requirementsmod.COPIESSDC_REQUIREMENT) if ui.configbool(b'experimental', b'treemanifest'): requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT) revlogv2 = ui.config(b'experimental', b'revlogv2') if revlogv2 == b'enable-unstable-format-and-corrupt-my-data': requirements.remove(requirementsmod.REVLOGV1_REQUIREMENT) # generaldelta is implied by revlogv2. requirements.discard(requirementsmod.GENERALDELTA_REQUIREMENT) requirements.add(requirementsmod.REVLOGV2_REQUIREMENT) # experimental config: format.internal-phase if ui.configbool(b'format', b'internal-phase'): requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT) if createopts.get(b'narrowfiles'): requirements.add(requirementsmod.NARROW_REQUIREMENT) if createopts.get(b'lfs'): requirements.add(b'lfs') if ui.configbool(b'format', b'bookmarks-in-store'): requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT) if ui.configbool(b'format', b'use-persistent-nodemap'): requirements.add(requirementsmod.NODEMAP_REQUIREMENT) # if share-safe is enabled, let's create the new repository with the new # requirement if ui.configbool(b'format', b'use-share-safe'): requirements.add(requirementsmod.SHARESAFE_REQUIREMENT) return requirements def checkrequirementscompat(ui, requirements): """Checks compatibility of repository requirements enabled and disabled. Returns a set of requirements which needs to be dropped because dependend requirements are not enabled. Also warns users about it""" dropped = set() if b'store' not in requirements: if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements: ui.warn( _( b'ignoring enabled \'format.bookmarks-in-store\' config ' b'beacuse it is incompatible with disabled ' b'\'format.usestore\' config\n' ) ) dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT) if ( requirementsmod.SHARED_REQUIREMENT in requirements or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements ): raise error.Abort( _( b"cannot create shared repository as source was created" b" with 'format.usestore' config disabled" ) ) if requirementsmod.SHARESAFE_REQUIREMENT in requirements: ui.warn( _( b"ignoring enabled 'format.use-share-safe' config because " b"it is incompatible with disabled 'format.usestore'" b" config\n" ) ) dropped.add(requirementsmod.SHARESAFE_REQUIREMENT) return dropped def filterknowncreateopts(ui, createopts): """Filters a dict of repo creation options against options that are known. Receives a dict of repo creation options and returns a dict of those options that we don't know how to handle. This function is called as part of repository creation. If the returned dict contains any items, repository creation will not be allowed, as it means there was a request to create a repository with options not recognized by loaded code. Extensions can wrap this function to filter out creation options they know how to handle. """ known = { b'backend', b'lfs', b'narrowfiles', b'sharedrepo', b'sharedrelative', b'shareditems', b'shallowfilestore', } return {k: v for k, v in createopts.items() if k not in known} def createrepository(ui, path, createopts=None): """Create a new repository in a vfs. ``path`` path to the new repo's working directory. ``createopts`` options for the new repository. The following keys for ``createopts`` are recognized: backend The storage backend to use. lfs Repository will be created with ``lfs`` requirement. The lfs extension will automatically be loaded when the repository is accessed. narrowfiles Set up repository to support narrow file storage. sharedrepo Repository object from which storage should be shared. sharedrelative Boolean indicating if the path to the shared repo should be stored as relative. By default, the pointer to the "parent" repo is stored as an absolute path. shareditems Set of items to share to the new repository (in addition to storage). shallowfilestore Indicates that storage for files should be shallow (not all ancestor revisions are known). """ createopts = defaultcreateopts(ui, createopts=createopts) unknownopts = filterknowncreateopts(ui, createopts) if not isinstance(unknownopts, dict): raise error.ProgrammingError( b'filterknowncreateopts() did not return a dict' ) if unknownopts: raise error.Abort( _( b'unable to create repository because of unknown ' b'creation option: %s' ) % b', '.join(sorted(unknownopts)), hint=_(b'is a required extension not loaded?'), ) requirements = newreporequirements(ui, createopts=createopts) requirements -= checkrequirementscompat(ui, requirements) wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True) hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg')) if hgvfs.exists(): raise error.RepoError(_(b'repository %s already exists') % path) if b'sharedrepo' in createopts: sharedpath = createopts[b'sharedrepo'].sharedpath if createopts.get(b'sharedrelative'): try: sharedpath = os.path.relpath(sharedpath, hgvfs.base) except (IOError, ValueError) as e: # ValueError is raised on Windows if the drive letters differ # on each path. raise error.Abort( _(b'cannot calculate relative path'), hint=stringutil.forcebytestr(e), ) if not wdirvfs.exists(): wdirvfs.makedirs() hgvfs.makedir(notindexed=True) if b'sharedrepo' not in createopts: hgvfs.mkdir(b'cache') hgvfs.mkdir(b'wcache') if b'store' in requirements and b'sharedrepo' not in createopts: hgvfs.mkdir(b'store') # We create an invalid changelog outside the store so very old # Mercurial versions (which didn't know about the requirements # file) encounter an error on reading the changelog. This # effectively locks out old clients and prevents them from # mucking with a repo in an unknown format. # # The revlog header has version 65535, which won't be recognized by # such old clients. hgvfs.append( b'00changelog.i', b'\0\0\xFF\xFF dummy changelog to prevent using the old repo ' b'layout', ) # Filter the requirements into working copy and store ones wcreq, storereq = scmutil.filterrequirements(requirements) # write working copy ones scmutil.writerequires(hgvfs, wcreq) # If there are store requirements and the current repository # is not a shared one, write stored requirements # For new shared repository, we don't need to write the store # requirements as they are already present in store requires if storereq and b'sharedrepo' not in createopts: storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True) scmutil.writerequires(storevfs, storereq) # Write out file telling readers where to find the shared store. if b'sharedrepo' in createopts: hgvfs.write(b'sharedpath', sharedpath) if createopts.get(b'shareditems'): shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n' hgvfs.write(b'shared', shared) def poisonrepository(repo): """Poison a repository instance so it can no longer be used.""" # Perform any cleanup on the instance. repo.close() # Our strategy is to replace the type of the object with one that # has all attribute lookups result in error. # # But we have to allow the close() method because some constructors # of repos call close() on repo references. class poisonedrepository(object): def __getattribute__(self, item): if item == 'close': return object.__getattribute__(self, item) raise error.ProgrammingError( b'repo instances should not be used after unshare' ) def close(self): pass # We may have a repoview, which intercepts __setattr__. So be sure # we operate at the lowest level possible. object.__setattr__(repo, '__class__', poisonedrepository) diff --git a/mercurial/requirements.py b/mercurial/requirements.py --- a/mercurial/requirements.py +++ b/mercurial/requirements.py @@ -1,79 +1,80 @@ # requirements.py - objects and functions related to repository requirements # # Copyright 2005-2007 Matt Mackall # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import absolute_import GENERALDELTA_REQUIREMENT = b'generaldelta' +DOTENCODE_REQUIREMENT = b'dotencode' # When narrowing is finalized and no longer subject to format changes, # we should move this to just "narrow" or similar. NARROW_REQUIREMENT = b'narrowhg-experimental' # Enables sparse working directory usage SPARSE_REQUIREMENT = b'exp-sparse' # Enables the internal phase which is used to hide changesets instead # of stripping them INTERNAL_PHASE_REQUIREMENT = b'internal-phase' # Stores manifest in Tree structure TREEMANIFEST_REQUIREMENT = b'treemanifest' REVLOGV1_REQUIREMENT = b'revlogv1' # Increment the sub-version when the revlog v2 format changes to lock out old # clients. REVLOGV2_REQUIREMENT = b'exp-revlogv2.1' # A repository with the sparserevlog feature will have delta chains that # can spread over a larger span. Sparse reading cuts these large spans into # pieces, so that each piece isn't too big. # Without the sparserevlog capability, reading from the repository could use # huge amounts of memory, because the whole span would be read at once, # including all the intermediate revisions that aren't pertinent for the chain. # This is why once a repository has enabled sparse-read, it becomes required. SPARSEREVLOG_REQUIREMENT = b'sparserevlog' # A repository with the sidedataflag requirement will allow to store extra # information for revision without altering their original hashes. SIDEDATA_REQUIREMENT = b'exp-sidedata-flag' # A repository with the the copies-sidedata-changeset requirement will store # copies related information in changeset's sidedata. COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset' # The repository use persistent nodemap for the changelog and the manifest. NODEMAP_REQUIREMENT = b'persistent-nodemap' # Denotes that the current repository is a share SHARED_REQUIREMENT = b'shared' # Denotes that current repository is a share and the shared source path is # relative to the current repository root path RELATIVE_SHARED_REQUIREMENT = b'relshared' # A repository with share implemented safely. The repository has different # store and working copy requirements i.e. both `.hg/requires` and # `.hg/store/requires` are present. SHARESAFE_REQUIREMENT = b'share-safe' # List of requirements which are working directory specific # These requirements cannot be shared between repositories if they # share the same store # * sparse is a working directory specific functionality and hence working # directory specific requirement # * SHARED_REQUIREMENT and RELATIVE_SHARED_REQUIREMENT are requirements which # represents that the current working copy/repository shares store of another # repo. Hence both of them should be stored in working copy # * SHARESAFE_REQUIREMENT needs to be stored in working dir to mark that rest of # the requirements are stored in store's requires WORKING_DIR_REQUIREMENTS = { SPARSE_REQUIREMENT, SHARED_REQUIREMENT, RELATIVE_SHARED_REQUIREMENT, SHARESAFE_REQUIREMENT, } diff --git a/mercurial/upgrade_utils/actions.py b/mercurial/upgrade_utils/actions.py --- a/mercurial/upgrade_utils/actions.py +++ b/mercurial/upgrade_utils/actions.py @@ -1,1011 +1,1011 @@ # upgrade.py - functions for in place upgrade of Mercurial repository # # Copyright (c) 2016-present, Gregory Szorc # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import absolute_import from ..i18n import _ from .. import ( error, localrepo, requirements, revlog, util, ) from ..utils import compression # list of requirements that request a clone of all revlog if added/removed RECLONES_REQUIREMENTS = { requirements.GENERALDELTA_REQUIREMENT, requirements.SPARSEREVLOG_REQUIREMENT, } def preservedrequirements(repo): return set() FORMAT_VARIANT = b'deficiency' OPTIMISATION = b'optimization' class improvement(object): """Represents an improvement that can be made as part of an upgrade. The following attributes are defined on each instance: name Machine-readable string uniquely identifying this improvement. It will be mapped to an action later in the upgrade process. type Either ``FORMAT_VARIANT`` or ``OPTIMISATION``. A format variant is where we change the storage format. Not all format variant changes are an obvious problem. An optimization is an action (sometimes optional) that can be taken to further improve the state of the repository. description Message intended for humans explaining the improvement in more detail, including the implications of it. For ``FORMAT_VARIANT`` types, should be worded in the present tense. For ``OPTIMISATION`` types, should be worded in the future tense. upgrademessage Message intended for humans explaining what an upgrade addressing this issue will do. Should be worded in the future tense. postupgrademessage Message intended for humans which will be shown post an upgrade operation when the improvement will be added postdowngrademessage Message intended for humans which will be shown post an upgrade operation in which this improvement was removed touches_filelogs (bool) Whether this improvement touches filelogs touches_manifests (bool) Whether this improvement touches manifests touches_changelog (bool) Whether this improvement touches changelog touches_requirements (bool) Whether this improvement changes repository requirements """ def __init__(self, name, type, description, upgrademessage): self.name = name self.type = type self.description = description self.upgrademessage = upgrademessage self.postupgrademessage = None self.postdowngrademessage = None # By default for now, we assume every improvement touches # all the things self.touches_filelogs = True self.touches_manifests = True self.touches_changelog = True self.touches_requirements = True def __eq__(self, other): if not isinstance(other, improvement): # This is what python tell use to do return NotImplemented return self.name == other.name def __ne__(self, other): return not (self == other) def __hash__(self): return hash(self.name) allformatvariant = [] def registerformatvariant(cls): allformatvariant.append(cls) return cls class formatvariant(improvement): """an improvement subclass dedicated to repository format""" type = FORMAT_VARIANT ### The following attributes should be defined for each class: # machine-readable string uniquely identifying this improvement. it will be # mapped to an action later in the upgrade process. name = None # message intended for humans explaining the improvement in more detail, # including the implications of it ``FORMAT_VARIANT`` types, should be # worded # in the present tense. description = None # message intended for humans explaining what an upgrade addressing this # issue will do. should be worded in the future tense. upgrademessage = None # value of current Mercurial default for new repository default = None # Message intended for humans which will be shown post an upgrade # operation when the improvement will be added postupgrademessage = None # Message intended for humans which will be shown post an upgrade # operation in which this improvement was removed postdowngrademessage = None # By default for now, we assume every improvement touches all the things touches_filelogs = True touches_manifests = True touches_changelog = True touches_requirements = True def __init__(self): raise NotImplementedError() @staticmethod def fromrepo(repo): """current value of the variant in the repository""" raise NotImplementedError() @staticmethod def fromconfig(repo): """current value of the variant in the configuration""" raise NotImplementedError() class requirementformatvariant(formatvariant): """formatvariant based on a 'requirement' name. Many format variant are controlled by a 'requirement'. We define a small subclass to factor the code. """ # the requirement that control this format variant _requirement = None @staticmethod def _newreporequirements(ui): return localrepo.newreporequirements( ui, localrepo.defaultcreateopts(ui) ) @classmethod def fromrepo(cls, repo): assert cls._requirement is not None return cls._requirement in repo.requirements @classmethod def fromconfig(cls, repo): assert cls._requirement is not None return cls._requirement in cls._newreporequirements(repo.ui) @registerformatvariant class fncache(requirementformatvariant): name = b'fncache' _requirement = b'fncache' default = True description = _( b'long and reserved filenames may not work correctly; ' b'repository performance is sub-optimal' ) upgrademessage = _( b'repository will be more resilient to storing ' b'certain paths and performance of certain ' b'operations should be improved' ) @registerformatvariant class dotencode(requirementformatvariant): name = b'dotencode' - _requirement = b'dotencode' + _requirement = requirements.DOTENCODE_REQUIREMENT default = True description = _( b'storage of filenames beginning with a period or ' b'space may not work correctly' ) upgrademessage = _( b'repository will be better able to store files ' b'beginning with a space or period' ) @registerformatvariant class generaldelta(requirementformatvariant): name = b'generaldelta' _requirement = requirements.GENERALDELTA_REQUIREMENT default = True description = _( b'deltas within internal storage are unable to ' b'choose optimal revisions; repository is larger and ' b'slower than it could be; interaction with other ' b'repositories may require extra network and CPU ' b'resources, making "hg push" and "hg pull" slower' ) upgrademessage = _( b'repository storage will be able to create ' b'optimal deltas; new repository data will be ' b'smaller and read times should decrease; ' b'interacting with other repositories using this ' b'storage model should require less network and ' b'CPU resources, making "hg push" and "hg pull" ' b'faster' ) @registerformatvariant class sharesafe(requirementformatvariant): name = b'share-safe' _requirement = requirements.SHARESAFE_REQUIREMENT default = False description = _( b'old shared repositories do not share source repository ' b'requirements and config. This leads to various problems ' b'when the source repository format is upgraded or some new ' b'extensions are enabled.' ) upgrademessage = _( b'Upgrades a repository to share-safe format so that future ' b'shares of this repository share its requirements and configs.' ) postdowngrademessage = _( b'repository downgraded to not use share safe mode, ' b'existing shares will not work and needs to' b' be reshared.' ) postupgrademessage = _( b'repository upgraded to share safe mode, existing' b' shares will still work in old non-safe mode. ' b'Re-share existing shares to use them in safe mode' b' New shares will be created in safe mode.' ) # upgrade only needs to change the requirements touches_filelogs = False touches_manifests = False touches_changelog = False touches_requirements = True @registerformatvariant class sparserevlog(requirementformatvariant): name = b'sparserevlog' _requirement = requirements.SPARSEREVLOG_REQUIREMENT default = True description = _( b'in order to limit disk reading and memory usage on older ' b'version, the span of a delta chain from its root to its ' b'end is limited, whatever the relevant data in this span. ' b'This can severly limit Mercurial ability to build good ' b'chain of delta resulting is much more storage space being ' b'taken and limit reusability of on disk delta during ' b'exchange.' ) upgrademessage = _( b'Revlog supports delta chain with more unused data ' b'between payload. These gaps will be skipped at read ' b'time. This allows for better delta chains, making a ' b'better compression and faster exchange with server.' ) @registerformatvariant class sidedata(requirementformatvariant): name = b'sidedata' _requirement = requirements.SIDEDATA_REQUIREMENT default = False description = _( b'Allows storage of extra data alongside a revision, ' b'unlocking various caching options.' ) upgrademessage = _(b'Allows storage of extra data alongside a revision.') @registerformatvariant class persistentnodemap(requirementformatvariant): name = b'persistent-nodemap' _requirement = requirements.NODEMAP_REQUIREMENT default = False description = _( b'persist the node -> rev mapping on disk to speedup lookup' ) upgrademessage = _(b'Speedup revision lookup by node id.') @registerformatvariant class copiessdc(requirementformatvariant): name = b'copies-sdc' _requirement = requirements.COPIESSDC_REQUIREMENT default = False description = _(b'Stores copies information alongside changesets.') upgrademessage = _( b'Allows to use more efficient algorithm to deal with ' b'copy tracing.' ) @registerformatvariant class removecldeltachain(formatvariant): name = b'plain-cl-delta' default = True description = _( b'changelog storage is using deltas instead of ' b'raw entries; changelog reading and any ' b'operation relying on changelog data are slower ' b'than they could be' ) upgrademessage = _( b'changelog storage will be reformated to ' b'store raw entries; changelog reading will be ' b'faster; changelog size may be reduced' ) @staticmethod def fromrepo(repo): # Mercurial 4.0 changed changelogs to not use delta chains. Search for # changelogs with deltas. cl = repo.changelog chainbase = cl.chainbase return all(rev == chainbase(rev) for rev in cl) @staticmethod def fromconfig(repo): return True @registerformatvariant class compressionengine(formatvariant): name = b'compression' default = b'zlib' description = _( b'Compresion algorithm used to compress data. ' b'Some engine are faster than other' ) upgrademessage = _( b'revlog content will be recompressed with the new algorithm.' ) @classmethod def fromrepo(cls, repo): # we allow multiple compression engine requirement to co-exist because # strickly speaking, revlog seems to support mixed compression style. # # The compression used for new entries will be "the last one" compression = b'zlib' for req in repo.requirements: prefix = req.startswith if prefix(b'revlog-compression-') or prefix(b'exp-compression-'): compression = req.split(b'-', 2)[2] return compression @classmethod def fromconfig(cls, repo): compengines = repo.ui.configlist(b'format', b'revlog-compression') # return the first valid value as the selection code would do for comp in compengines: if comp in util.compengines: return comp # no valide compression found lets display it all for clarity return b','.join(compengines) @registerformatvariant class compressionlevel(formatvariant): name = b'compression-level' default = b'default' description = _(b'compression level') upgrademessage = _(b'revlog content will be recompressed') @classmethod def fromrepo(cls, repo): comp = compressionengine.fromrepo(repo) level = None if comp == b'zlib': level = repo.ui.configint(b'storage', b'revlog.zlib.level') elif comp == b'zstd': level = repo.ui.configint(b'storage', b'revlog.zstd.level') if level is None: return b'default' return bytes(level) @classmethod def fromconfig(cls, repo): comp = compressionengine.fromconfig(repo) level = None if comp == b'zlib': level = repo.ui.configint(b'storage', b'revlog.zlib.level') elif comp == b'zstd': level = repo.ui.configint(b'storage', b'revlog.zstd.level') if level is None: return b'default' return bytes(level) def find_format_upgrades(repo): """returns a list of format upgrades which can be perform on the repo""" upgrades = [] # We could detect lack of revlogv1 and store here, but they were added # in 0.9.2 and we don't support upgrading repos without these # requirements, so let's not bother. for fv in allformatvariant: if not fv.fromrepo(repo): upgrades.append(fv) return upgrades def find_format_downgrades(repo): """returns a list of format downgrades which will be performed on the repo because of disabled config option for them""" downgrades = [] for fv in allformatvariant: if fv.name == b'compression': # If there is a compression change between repository # and config, destination repository compression will change # and current compression will be removed. if fv.fromrepo(repo) != fv.fromconfig(repo): downgrades.append(fv) continue # format variant exist in repo but does not exist in new repository # config if fv.fromrepo(repo) and not fv.fromconfig(repo): downgrades.append(fv) return downgrades ALL_OPTIMISATIONS = [] def register_optimization(obj): ALL_OPTIMISATIONS.append(obj) return obj register_optimization( improvement( name=b're-delta-parent', type=OPTIMISATION, description=_( b'deltas within internal storage will be recalculated to ' b'choose an optimal base revision where this was not ' b'already done; the size of the repository may shrink and ' b'various operations may become faster; the first time ' b'this optimization is performed could slow down upgrade ' b'execution considerably; subsequent invocations should ' b'not run noticeably slower' ), upgrademessage=_( b'deltas within internal storage will choose a new ' b'base revision if needed' ), ) ) register_optimization( improvement( name=b're-delta-multibase', type=OPTIMISATION, description=_( b'deltas within internal storage will be recalculated ' b'against multiple base revision and the smallest ' b'difference will be used; the size of the repository may ' b'shrink significantly when there are many merges; this ' b'optimization will slow down execution in proportion to ' b'the number of merges in the repository and the amount ' b'of files in the repository; this slow down should not ' b'be significant unless there are tens of thousands of ' b'files and thousands of merges' ), upgrademessage=_( b'deltas within internal storage will choose an ' b'optimal delta by computing deltas against multiple ' b'parents; may slow down execution time ' b'significantly' ), ) ) register_optimization( improvement( name=b're-delta-all', type=OPTIMISATION, description=_( b'deltas within internal storage will always be ' b'recalculated without reusing prior deltas; this will ' b'likely make execution run several times slower; this ' b'optimization is typically not needed' ), upgrademessage=_( b'deltas within internal storage will be fully ' b'recomputed; this will likely drastically slow down ' b'execution time' ), ) ) register_optimization( improvement( name=b're-delta-fulladd', type=OPTIMISATION, description=_( b'every revision will be re-added as if it was new ' b'content. It will go through the full storage ' b'mechanism giving extensions a chance to process it ' b'(eg. lfs). This is similar to "re-delta-all" but even ' b'slower since more logic is involved.' ), upgrademessage=_( b'each revision will be added as new content to the ' b'internal storage; this will likely drastically slow ' b'down execution time, but some extensions might need ' b'it' ), ) ) def findoptimizations(repo): """Determine optimisation that could be used during upgrade""" # These are unconditionally added. There is logic later that figures out # which ones to apply. return list(ALL_OPTIMISATIONS) def determine_upgrade_actions( repo, format_upgrades, optimizations, sourcereqs, destreqs ): """Determine upgrade actions that will be performed. Given a list of improvements as returned by ``find_format_upgrades`` and ``findoptimizations``, determine the list of upgrade actions that will be performed. The role of this function is to filter improvements if needed, apply recommended optimizations from the improvements list that make sense, etc. Returns a list of action names. """ newactions = [] for d in format_upgrades: name = d._requirement # If the action is a requirement that doesn't show up in the # destination requirements, prune the action. if name is not None and name not in destreqs: continue newactions.append(d) newactions.extend(o for o in sorted(optimizations) if o not in newactions) # FUTURE consider adding some optimizations here for certain transitions. # e.g. adding generaldelta could schedule parent redeltas. return newactions class UpgradeOperation(object): """represent the work to be done during an upgrade""" def __init__( self, ui, new_requirements, current_requirements, upgrade_actions, removed_actions, revlogs_to_process, backup_store, ): self.ui = ui self.new_requirements = new_requirements self.current_requirements = current_requirements # list of upgrade actions the operation will perform self.upgrade_actions = upgrade_actions self._upgrade_actions_names = set([a.name for a in upgrade_actions]) self.removed_actions = removed_actions self.revlogs_to_process = revlogs_to_process # requirements which will be added by the operation self._added_requirements = ( self.new_requirements - self.current_requirements ) # requirements which will be removed by the operation self._removed_requirements = ( self.current_requirements - self.new_requirements ) # requirements which will be preserved by the operation self._preserved_requirements = ( self.current_requirements & self.new_requirements ) # optimizations which are not used and it's recommended that they # should use them all_optimizations = findoptimizations(None) self.unused_optimizations = [ i for i in all_optimizations if i not in self.upgrade_actions ] # delta reuse mode of this upgrade operation self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS if b're-delta-all' in self._upgrade_actions_names: self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER elif b're-delta-parent' in self._upgrade_actions_names: self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS elif b're-delta-multibase' in self._upgrade_actions_names: self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS elif b're-delta-fulladd' in self._upgrade_actions_names: self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD # should this operation force re-delta of both parents self.force_re_delta_both_parents = ( b're-delta-multibase' in self._upgrade_actions_names ) # should this operation create a backup of the store self.backup_store = backup_store # whether the operation touches different revlogs at all or not self.touches_filelogs = self._touches_filelogs() self.touches_manifests = self._touches_manifests() self.touches_changelog = self._touches_changelog() # whether the operation touches requirements file or not self.touches_requirements = self._touches_requirements() self.touches_store = ( self.touches_filelogs or self.touches_manifests or self.touches_changelog ) # does the operation only touches repository requirement self.requirements_only = ( self.touches_requirements and not self.touches_store ) def _touches_filelogs(self): for a in self.upgrade_actions: # in optimisations, we re-process the revlogs again if a.type == OPTIMISATION: return True elif a.touches_filelogs: return True for a in self.removed_actions: if a.touches_filelogs: return True return False def _touches_manifests(self): for a in self.upgrade_actions: # in optimisations, we re-process the revlogs again if a.type == OPTIMISATION: return True elif a.touches_manifests: return True for a in self.removed_actions: if a.touches_manifests: return True return False def _touches_changelog(self): for a in self.upgrade_actions: # in optimisations, we re-process the revlogs again if a.type == OPTIMISATION: return True elif a.touches_changelog: return True for a in self.removed_actions: if a.touches_changelog: return True return False def _touches_requirements(self): for a in self.upgrade_actions: # optimisations are used to re-process revlogs and does not result # in a requirement being added or removed if a.type == OPTIMISATION: pass elif a.touches_requirements: return True for a in self.removed_actions: if a.touches_requirements: return True return False def _write_labeled(self, l, label): """ Utility function to aid writing of a list under one label """ first = True for r in sorted(l): if not first: self.ui.write(b', ') self.ui.write(r, label=label) first = False def print_requirements(self): self.ui.write(_(b'requirements\n')) self.ui.write(_(b' preserved: ')) self._write_labeled( self._preserved_requirements, "upgrade-repo.requirement.preserved" ) self.ui.write((b'\n')) if self._removed_requirements: self.ui.write(_(b' removed: ')) self._write_labeled( self._removed_requirements, "upgrade-repo.requirement.removed" ) self.ui.write((b'\n')) if self._added_requirements: self.ui.write(_(b' added: ')) self._write_labeled( self._added_requirements, "upgrade-repo.requirement.added" ) self.ui.write((b'\n')) self.ui.write(b'\n') def print_optimisations(self): optimisations = [ a for a in self.upgrade_actions if a.type == OPTIMISATION ] optimisations.sort(key=lambda a: a.name) if optimisations: self.ui.write(_(b'optimisations: ')) self._write_labeled( [a.name for a in optimisations], "upgrade-repo.optimisation.performed", ) self.ui.write(b'\n\n') def print_upgrade_actions(self): for a in self.upgrade_actions: self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage)) def print_affected_revlogs(self): if not self.revlogs_to_process: self.ui.write((b'no revlogs to process\n')) else: self.ui.write((b'processed revlogs:\n')) for r in sorted(self.revlogs_to_process): self.ui.write((b' - %s\n' % r)) self.ui.write((b'\n')) def print_unused_optimizations(self): for i in self.unused_optimizations: self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description)) def has_upgrade_action(self, name): """ Check whether the upgrade operation will perform this action """ return name in self._upgrade_actions_names def print_post_op_messages(self): """ print post upgrade operation warning messages """ for a in self.upgrade_actions: if a.postupgrademessage is not None: self.ui.warn(b'%s\n' % a.postupgrademessage) for a in self.removed_actions: if a.postdowngrademessage is not None: self.ui.warn(b'%s\n' % a.postdowngrademessage) ### Code checking if a repository can got through the upgrade process at all. # def requiredsourcerequirements(repo): """Obtain requirements required to be present to upgrade a repo. An upgrade will not be allowed if the repository doesn't have the requirements returned by this function. """ return { # Introduced in Mercurial 0.9.2. requirements.REVLOGV1_REQUIREMENT, # Introduced in Mercurial 0.9.2. b'store', } def blocksourcerequirements(repo): """Obtain requirements that will prevent an upgrade from occurring. An upgrade cannot be performed if the source repository contains a requirements in the returned set. """ return { # The upgrade code does not yet support these experimental features. # This is an artificial limitation. requirements.TREEMANIFEST_REQUIREMENT, # This was a precursor to generaldelta and was never enabled by default. # It should (hopefully) not exist in the wild. b'parentdelta', # Upgrade should operate on the actual store, not the shared link. requirements.SHARED_REQUIREMENT, } def check_source_requirements(repo): """Ensure that no existing requirements prevent the repository upgrade""" required = requiredsourcerequirements(repo) missingreqs = required - repo.requirements if missingreqs: msg = _(b'cannot upgrade repository; requirement missing: %s') missingreqs = b', '.join(sorted(missingreqs)) raise error.Abort(msg % missingreqs) blocking = blocksourcerequirements(repo) blockingreqs = blocking & repo.requirements if blockingreqs: m = _(b'cannot upgrade repository; unsupported source requirement: %s') blockingreqs = b', '.join(sorted(blockingreqs)) raise error.Abort(m % blockingreqs) ### Verify the validity of the planned requirement changes #################### def supportremovedrequirements(repo): """Obtain requirements that can be removed during an upgrade. If an upgrade were to create a repository that dropped a requirement, the dropped requirement must appear in the returned set for the upgrade to be allowed. """ supported = { requirements.SPARSEREVLOG_REQUIREMENT, requirements.SIDEDATA_REQUIREMENT, requirements.COPIESSDC_REQUIREMENT, requirements.NODEMAP_REQUIREMENT, requirements.SHARESAFE_REQUIREMENT, } for name in compression.compengines: engine = compression.compengines[name] if engine.available() and engine.revlogheader(): supported.add(b'exp-compression-%s' % name) if engine.name() == b'zstd': supported.add(b'revlog-compression-zstd') return supported def supporteddestrequirements(repo): """Obtain requirements that upgrade supports in the destination. If the result of the upgrade would create requirements not in this set, the upgrade is disallowed. Extensions should monkeypatch this to add their custom requirements. """ supported = { - b'dotencode', + requirements.DOTENCODE_REQUIREMENT, b'fncache', requirements.GENERALDELTA_REQUIREMENT, requirements.REVLOGV1_REQUIREMENT, b'store', requirements.SPARSEREVLOG_REQUIREMENT, requirements.SIDEDATA_REQUIREMENT, requirements.COPIESSDC_REQUIREMENT, requirements.NODEMAP_REQUIREMENT, requirements.SHARESAFE_REQUIREMENT, } for name in compression.compengines: engine = compression.compengines[name] if engine.available() and engine.revlogheader(): supported.add(b'exp-compression-%s' % name) if engine.name() == b'zstd': supported.add(b'revlog-compression-zstd') return supported def allowednewrequirements(repo): """Obtain requirements that can be added to a repository during upgrade. This is used to disallow proposed requirements from being added when they weren't present before. We use a list of allowed requirement additions instead of a list of known bad additions because the whitelist approach is safer and will prevent future, unknown requirements from accidentally being added. """ supported = { - b'dotencode', + requirements.DOTENCODE_REQUIREMENT, b'fncache', requirements.GENERALDELTA_REQUIREMENT, requirements.SPARSEREVLOG_REQUIREMENT, requirements.SIDEDATA_REQUIREMENT, requirements.COPIESSDC_REQUIREMENT, requirements.NODEMAP_REQUIREMENT, requirements.SHARESAFE_REQUIREMENT, } for name in compression.compengines: engine = compression.compengines[name] if engine.available() and engine.revlogheader(): supported.add(b'exp-compression-%s' % name) if engine.name() == b'zstd': supported.add(b'revlog-compression-zstd') return supported def check_requirements_changes(repo, new_reqs): old_reqs = repo.requirements support_removal = supportremovedrequirements(repo) no_remove_reqs = old_reqs - new_reqs - support_removal if no_remove_reqs: msg = _(b'cannot upgrade repository; requirement would be removed: %s') no_remove_reqs = b', '.join(sorted(no_remove_reqs)) raise error.Abort(msg % no_remove_reqs) support_addition = allowednewrequirements(repo) no_add_reqs = new_reqs - old_reqs - support_addition if no_add_reqs: m = _(b'cannot upgrade repository; do not support adding requirement: ') no_add_reqs = b', '.join(sorted(no_add_reqs)) raise error.Abort(m + no_add_reqs) supported = supporteddestrequirements(repo) unsupported_reqs = new_reqs - supported if unsupported_reqs: msg = _( b'cannot upgrade repository; do not support destination ' b'requirement: %s' ) unsupported_reqs = b', '.join(sorted(unsupported_reqs)) raise error.Abort(msg % unsupported_reqs)