diff --git a/hgext/narrow/__init__.py b/hgext/narrow/__init__.py --- a/hgext/narrow/__init__.py +++ b/hgext/narrow/__init__.py @@ -1,93 +1,93 @@ # __init__.py - narrowhg extension # # Copyright 2017 Google, Inc. # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. '''create clones which fetch history data for subset of files (EXPERIMENTAL)''' from __future__ import absolute_import # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should # be specifying the version(s) of Mercurial they are tested with, or # leave the attribute unspecified. testedwith = 'ships-with-hg-core' from mercurial import ( extensions, hg, localrepo, registrar, verify as verifymod, ) from . import ( narrowbundle2, narrowchangegroup, narrowcommands, narrowcopies, narrowdirstate, narrowmerge, narrowpatch, narrowrepo, narrowrevlog, narrowtemplates, narrowwirepeer, ) configtable = {} configitem = registrar.configitem(configtable) # Narrowhg *has* support for serving ellipsis nodes (which are used at # least by Google's internal server), but that support is pretty # fragile and has a lot of problems on real-world repositories that # have complex graph topologies. This could probably be corrected, but # absent someone needing the full support for ellipsis nodes in # repositories with merges, it's unlikely this work will get done. As # of this writining in late 2017, all repositories large enough for # ellipsis nodes to be a hard requirement also enforce strictly linear # history for other scaling reasons. configitem('experimental', 'narrowservebrokenellipses', default=False, alias=[('narrow', 'serveellipses')], ) # Export the commands table for Mercurial to see. cmdtable = narrowcommands.table -localrepo.localrepository._basesupported.add(narrowrepo.requirement) +localrepo.localrepository._basesupported.add(narrowrepo.REQUIREMENT) def uisetup(ui): """Wraps user-facing mercurial commands with narrow-aware versions.""" narrowrevlog.setup() narrowbundle2.setup() narrowmerge.setup() narrowtemplates.setup() narrowcommands.setup() narrowchangegroup.setup() narrowwirepeer.uisetup() def reposetup(ui, repo): """Wraps local repositories with narrow repo support.""" if not isinstance(repo, localrepo.localrepository): return - if narrowrepo.requirement in repo.requirements: + if narrowrepo.REQUIREMENT in repo.requirements: narrowrepo.wraprepo(repo, True) narrowcopies.setup(repo) narrowdirstate.setup(repo) narrowpatch.setup(repo) narrowwirepeer.reposetup(repo) def _verifierinit(orig, self, repo, matcher=None): # The verifier's matcher argument was desgined for narrowhg, so it should # be None from core. If another extension passes a matcher (unlikely), # we'll have to fail until matchers can be composed more easily. assert matcher is None matcher = getattr(repo, 'narrowmatch', lambda: None)() orig(self, repo, matcher) def extsetup(ui): extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit) extensions.wrapfunction(hg, 'postshare', narrowrepo.wrappostshare) extensions.wrapfunction(hg, 'copystore', narrowrepo.unsharenarrowspec) diff --git a/hgext/narrow/narrowbundle2.py b/hgext/narrow/narrowbundle2.py --- a/hgext/narrow/narrowbundle2.py +++ b/hgext/narrow/narrowbundle2.py @@ -1,496 +1,496 @@ # narrowbundle2.py - bundle2 extensions for narrow repository support # # Copyright 2017 Google, Inc. # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import absolute_import import collections import errno import struct from mercurial.i18n import _ from mercurial.node import ( bin, nullid, nullrev, ) from mercurial import ( bundle2, changegroup, dagutil, error, exchange, extensions, repair, util, wireproto, ) from . import ( narrowrepo, narrowspec, ) NARROWCAP = 'narrow' _NARROWACL_SECTION = 'narrowhgacl' _CHANGESPECPART = NARROWCAP + ':changespec' _SPECPART = NARROWCAP + ':spec' _SPECPART_INCLUDE = 'include' _SPECPART_EXCLUDE = 'exclude' _KILLNODESIGNAL = 'KILL' _DONESIGNAL = 'DONE' _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text) _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text) _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER) _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER) # When advertising capabilities, always include narrow clone support. def getrepocaps_narrow(orig, repo, **kwargs): caps = orig(repo, **kwargs) caps[NARROWCAP] = ['v0'] return caps def _computeellipsis(repo, common, heads, known, match, depth=None): """Compute the shape of a narrowed DAG. Args: repo: The repository we're transferring. common: The roots of the DAG range we're transferring. May be just [nullid], which means all ancestors of heads. heads: The heads of the DAG range we're transferring. match: The narrowmatcher that allows us to identify relevant changes. depth: If not None, only consider nodes to be full nodes if they are at most depth changesets away from one of heads. Returns: A tuple of (visitnodes, relevant_nodes, ellipsisroots) where: visitnodes: The list of nodes (either full or ellipsis) which need to be sent to the client. relevant_nodes: The set of changelog nodes which change a file inside the narrowspec. The client needs these as non-ellipsis nodes. ellipsisroots: A dict of {rev: parents} that is used in narrowchangegroup to produce ellipsis nodes with the correct parents. """ cl = repo.changelog mfl = repo.manifestlog cldag = dagutil.revlogdag(cl) # dagutil does not like nullid/nullrev commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev]) headsrevs = cldag.internalizeall(heads) if depth: revdepth = {h: 0 for h in headsrevs} ellipsisheads = collections.defaultdict(set) ellipsisroots = collections.defaultdict(set) def addroot(head, curchange): """Add a root to an ellipsis head, splitting heads with 3 roots.""" ellipsisroots[head].add(curchange) # Recursively split ellipsis heads with 3 roots by finding the # roots' youngest common descendant which is an elided merge commit. # That descendant takes 2 of the 3 roots as its own, and becomes a # root of the head. while len(ellipsisroots[head]) > 2: child, roots = splithead(head) splitroots(head, child, roots) head = child # Recurse in case we just added a 3rd root def splitroots(head, child, roots): ellipsisroots[head].difference_update(roots) ellipsisroots[head].add(child) ellipsisroots[child].update(roots) ellipsisroots[child].discard(child) def splithead(head): r1, r2, r3 = sorted(ellipsisroots[head]) for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)): mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head) for j in mid: if j == nr2: return nr2, (nr1, nr2) if j not in ellipsisroots or len(ellipsisroots[j]) < 2: return j, (nr1, nr2) raise error.Abort('Failed to split up ellipsis node! head: %d, ' 'roots: %d %d %d' % (head, r1, r2, r3)) missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs)) visit = reversed(missing) relevant_nodes = set() visitnodes = map(cl.node, missing) required = set(headsrevs) | known for rev in visit: clrev = cl.changelogrevision(rev) ps = cldag.parents(rev) if depth is not None: curdepth = revdepth[rev] for p in ps: revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1)) needed = False shallow_enough = depth is None or revdepth[rev] <= depth if shallow_enough: curmf = mfl[clrev.manifest].read() if ps: # We choose to not trust the changed files list in # changesets because it's not always correct. TODO: could # we trust it for the non-merge case? p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read() needed = any(match(f) for f in curmf.diff(p1mf).iterkeys()) if not needed and len(ps) > 1: # For merge changes, the list of changed files is not # helpful, since we need to emit the merge if a file # in the narrow spec has changed on either side of the # merge. As a result, we do a manifest diff to check. p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read() needed = any(match(f) for f in curmf.diff(p2mf).iterkeys()) else: # For a root node, we need to include the node if any # files in the node match the narrowspec. needed = any(match(f) for f in curmf) if needed: for head in ellipsisheads[rev]: addroot(head, rev) for p in ps: required.add(p) relevant_nodes.add(cl.node(rev)) else: if not ps: ps = [nullrev] if rev in required: for head in ellipsisheads[rev]: addroot(head, rev) for p in ps: ellipsisheads[p].add(rev) else: for p in ps: ellipsisheads[p] |= ellipsisheads[rev] # add common changesets as roots of their reachable ellipsis heads for c in commonrevs: for head in ellipsisheads[c]: addroot(head, c) return visitnodes, relevant_nodes, ellipsisroots def _packellipsischangegroup(repo, common, match, relevant_nodes, ellipsisroots, visitnodes, depth, source, version): if version in ('01', '02'): raise error.Abort( 'ellipsis nodes require at least cg3 on client and server, ' 'but negotiated version %s' % version) # We wrap cg1packer.revchunk, using a side channel to pass # relevant_nodes into that area. Then if linknode isn't in the # set, we know we have an ellipsis node and we should defer # sending that node's data. We override close() to detect # pending ellipsis nodes and flush them. packer = changegroup.getbundler(version, repo) # Let the packer have access to the narrow matcher so it can # omit filelogs and dirlogs as needed packer._narrow_matcher = lambda : match # Give the packer the list of nodes which should not be # ellipsis nodes. We store this rather than the set of nodes # that should be an ellipsis because for very large histories # we expect this to be significantly smaller. packer.full_nodes = relevant_nodes # Maps ellipsis revs to their roots at the changelog level. packer.precomputed_ellipsis = ellipsisroots # Maps CL revs to per-revlog revisions. Cleared in close() at # the end of each group. packer.clrev_to_localrev = {} packer.next_clrev_to_localrev = {} # Maps changelog nodes to changelog revs. Filled in once # during changelog stage and then left unmodified. packer.clnode_to_rev = {} packer.changelog_done = False # If true, informs the packer that it is serving shallow content and might # need to pack file contents not introduced by the changes being packed. packer.is_shallow = depth is not None return packer.generate(common, visitnodes, False, source) # Serve a changegroup for a client with a narrow clone. def getbundlechangegrouppart_narrow(bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, common=None, **kwargs): cgversions = b2caps.get('changegroup') getcgkwargs = {} if cgversions: # 3.1 and 3.2 ship with an empty value cgversions = [v for v in cgversions if v in changegroup.supportedoutgoingversions(repo)] if not cgversions: raise ValueError(_('no common changegroup version')) version = getcgkwargs['version'] = max(cgversions) else: raise ValueError(_("server does not advertise changegroup version," " can't negotiate support for ellipsis nodes")) include = sorted(filter(bool, kwargs.get('includepats', []))) exclude = sorted(filter(bool, kwargs.get('excludepats', []))) newmatch = narrowspec.match(repo.root, include=include, exclude=exclude) if not repo.ui.configbool("experimental", "narrowservebrokenellipses"): outgoing = exchange._computeoutgoing(repo, heads, common) if not outgoing.missing: return def wrappedgetbundler(orig, *args, **kwargs): bundler = orig(*args, **kwargs) bundler._narrow_matcher = lambda : newmatch return bundler with extensions.wrappedfunction(changegroup, 'getbundler', wrappedgetbundler): cg = changegroup.makestream(repo, outgoing, version, source) part = bundler.newpart('changegroup', data=cg) part.addparam('version', version) if 'treemanifest' in repo.requirements: part.addparam('treemanifest', '1') if include or exclude: narrowspecpart = bundler.newpart(_SPECPART) if include: narrowspecpart.addparam( _SPECPART_INCLUDE, '\n'.join(include), mandatory=True) if exclude: narrowspecpart.addparam( _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True) return depth = kwargs.get('depth', None) if depth is not None: depth = int(depth) if depth < 1: raise error.Abort(_('depth must be positive, got %d') % depth) heads = set(heads or repo.heads()) common = set(common or [nullid]) oldinclude = sorted(filter(bool, kwargs.get('oldincludepats', []))) oldexclude = sorted(filter(bool, kwargs.get('oldexcludepats', []))) known = {bin(n) for n in kwargs.get('known', [])} if known and (oldinclude != include or oldexclude != exclude): # Steps: # 1. Send kill for "$known & ::common" # # 2. Send changegroup for ::common # # 3. Proceed. # # In the future, we can send kills for only the specific # nodes we know should go away or change shape, and then # send a data stream that tells the client something like this: # # a) apply this changegroup # b) apply nodes XXX, YYY, ZZZ that you already have # c) goto a # # until they've built up the full new state. # Convert to revnums and intersect with "common". The client should # have made it a subset of "common" already, but let's be safe. known = set(repo.revs("%ln & ::%ln", known, common)) # TODO: we could send only roots() of this set, and the # list of nodes in common, and the client could work out # what to strip, instead of us explicitly sending every # single node. deadrevs = known def genkills(): for r in deadrevs: yield _KILLNODESIGNAL yield repo.changelog.node(r) yield _DONESIGNAL bundler.newpart(_CHANGESPECPART, data=genkills()) newvisit, newfull, newellipsis = _computeellipsis( repo, set(), common, known, newmatch) if newvisit: cg = _packellipsischangegroup( repo, common, newmatch, newfull, newellipsis, newvisit, depth, source, version) part = bundler.newpart('changegroup', data=cg) part.addparam('version', version) if 'treemanifest' in repo.requirements: part.addparam('treemanifest', '1') visitnodes, relevant_nodes, ellipsisroots = _computeellipsis( repo, common, heads, set(), newmatch, depth=depth) repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes)) if visitnodes: cg = _packellipsischangegroup( repo, common, newmatch, relevant_nodes, ellipsisroots, visitnodes, depth, source, version) part = bundler.newpart('changegroup', data=cg) part.addparam('version', version) if 'treemanifest' in repo.requirements: part.addparam('treemanifest', '1') def applyacl_narrow(repo, kwargs): username = repo.ui.shortuser(repo.ui.username()) user_includes = repo.ui.configlist( _NARROWACL_SECTION, username + '.includes', repo.ui.configlist(_NARROWACL_SECTION, 'default.includes')) user_excludes = repo.ui.configlist( _NARROWACL_SECTION, username + '.excludes', repo.ui.configlist(_NARROWACL_SECTION, 'default.excludes')) if not user_includes: raise error.Abort(_("{} configuration for user {} is empty") .format(_NARROWACL_SECTION, username)) user_includes = [ 'path:.' if p == '*' else 'path:' + p for p in user_includes] user_excludes = [ 'path:.' if p == '*' else 'path:' + p for p in user_excludes] req_includes = set(kwargs.get('includepats', [])) req_excludes = set(kwargs.get('excludepats', [])) invalid_includes = [] req_includes, req_excludes = narrowspec.restrictpatterns( req_includes, req_excludes, user_includes, user_excludes, invalid_includes) if invalid_includes: raise error.Abort( _("The following includes are not accessible for {}: {}") .format(username, invalid_includes)) new_args = {} new_args.update(kwargs) new_args['includepats'] = req_includes if req_excludes: new_args['excludepats'] = req_excludes return new_args @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE)) def _handlechangespec_2(op, inpart): includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines()) excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines()) narrowspec.save(op.repo, includepats, excludepats) - if not narrowrepo.requirement in op.repo.requirements: - op.repo.requirements.add(narrowrepo.requirement) + if not narrowrepo.REQUIREMENT in op.repo.requirements: + op.repo.requirements.add(narrowrepo.REQUIREMENT) op.repo._writerequirements() op.repo.invalidate(clearfilecache=True) @bundle2.parthandler(_CHANGESPECPART) def _handlechangespec(op, inpart): repo = op.repo cl = repo.changelog # changesets which need to be stripped entirely. either they're no longer # needed in the new narrow spec, or the server is sending a replacement # in the changegroup part. clkills = set() # A changespec part contains all the updates to ellipsis nodes # that will happen as a result of widening or narrowing a # repo. All the changes that this block encounters are ellipsis # nodes or flags to kill an existing ellipsis. chunksignal = changegroup.readexactly(inpart, 4) while chunksignal != _DONESIGNAL: if chunksignal == _KILLNODESIGNAL: # a node used to be an ellipsis but isn't anymore ck = changegroup.readexactly(inpart, 20) if cl.hasnode(ck): clkills.add(ck) else: raise error.Abort( _('unexpected changespec node chunk type: %s') % chunksignal) chunksignal = changegroup.readexactly(inpart, 4) if clkills: # preserve bookmarks that repair.strip() would otherwise strip bmstore = repo._bookmarks class dummybmstore(dict): def applychanges(self, repo, tr, changes): pass def recordchange(self, tr): # legacy version pass repo._bookmarks = dummybmstore() chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True, topic='widen') repo._bookmarks = bmstore if chgrpfile: # presence of _widen_bundle attribute activates widen handler later op._widen_bundle = chgrpfile # Set the new narrowspec if we're widening. The setnewnarrowpats() method # will currently always be there when using the core+narrowhg server, but # other servers may include a changespec part even when not widening (e.g. # because we're deepening a shallow repo). if util.safehasattr(repo, 'setnewnarrowpats'): repo.setnewnarrowpats() def handlechangegroup_widen(op, inpart): """Changegroup exchange handler which restores temporarily-stripped nodes""" # We saved a bundle with stripped node data we must now restore. # This approach is based on mercurial/repair.py@6ee26a53c111. repo = op.repo ui = op.ui chgrpfile = op._widen_bundle del op._widen_bundle vfs = repo.vfs ui.note(_("adding branch\n")) f = vfs.open(chgrpfile, "rb") try: gen = exchange.readbundle(ui, f, chgrpfile, vfs) if not ui.verbose: # silence internal shuffling chatter ui.pushbuffer() if isinstance(gen, bundle2.unbundle20): with repo.transaction('strip') as tr: bundle2.processbundle(repo, gen, lambda: tr) else: gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True) if not ui.verbose: ui.popbuffer() finally: f.close() # remove undo files for undovfs, undofile in repo.undofiles(): try: undovfs.unlink(undofile) except OSError as e: if e.errno != errno.ENOENT: ui.warn(_('error removing %s: %s\n') % (undovfs.join(undofile), str(e))) # Remove partial backup only if there were no exceptions vfs.unlink(chgrpfile) def setup(): """Enable narrow repo support in bundle2-related extension points.""" extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow) wireproto.gboptsmap['narrow'] = 'boolean' wireproto.gboptsmap['depth'] = 'plain' wireproto.gboptsmap['oldincludepats'] = 'csv' wireproto.gboptsmap['oldexcludepats'] = 'csv' wireproto.gboptsmap['includepats'] = 'csv' wireproto.gboptsmap['excludepats'] = 'csv' wireproto.gboptsmap['known'] = 'csv' # Extend changegroup serving to handle requests from narrow clients. origcgfn = exchange.getbundle2partsmapping['changegroup'] def wrappedcgfn(*args, **kwargs): repo = args[1] if repo.ui.has_section(_NARROWACL_SECTION): getbundlechangegrouppart_narrow( *args, **applyacl_narrow(repo, kwargs)) elif kwargs.get('narrow', False): getbundlechangegrouppart_narrow(*args, **kwargs) else: origcgfn(*args, **kwargs) exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn # Extend changegroup receiver so client can fixup after widen requests. origcghandler = bundle2.parthandlermapping['changegroup'] def wrappedcghandler(op, inpart): origcghandler(op, inpart) if util.safehasattr(op, '_widen_bundle'): handlechangegroup_widen(op, inpart) wrappedcghandler.params = origcghandler.params bundle2.parthandlermapping['changegroup'] = wrappedcghandler diff --git a/hgext/narrow/narrowchangegroup.py b/hgext/narrow/narrowchangegroup.py --- a/hgext/narrow/narrowchangegroup.py +++ b/hgext/narrow/narrowchangegroup.py @@ -1,385 +1,385 @@ # narrowchangegroup.py - narrow clone changegroup creation and consumption # # Copyright 2017 Google, Inc. # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import absolute_import from mercurial.i18n import _ from mercurial import ( changegroup, error, extensions, manifest, mdiff, node, util, ) from . import ( narrowrepo, narrowrevlog, ) def setup(): def supportedoutgoingversions(orig, repo): versions = orig(repo) - if narrowrepo.requirement in repo.requirements: + if narrowrepo.REQUIREMENT in repo.requirements: versions.discard('01') versions.discard('02') return versions extensions.wrapfunction(changegroup, 'supportedoutgoingversions', supportedoutgoingversions) def prune(orig, self, revlog, missing, commonrevs): if isinstance(revlog, manifest.manifestrevlog): matcher = getattr(self._repo, 'narrowmatch', getattr(self, '_narrow_matcher', None)) if (matcher is not None and not matcher().visitdir(revlog._dir[:-1] or '.')): return [] return orig(self, revlog, missing, commonrevs) extensions.wrapfunction(changegroup.cg1packer, 'prune', prune) def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source): matcher = getattr(self._repo, 'narrowmatch', getattr(self, '_narrow_matcher', None)) if matcher is not None: narrowmatch = matcher() changedfiles = filter(narrowmatch, changedfiles) if getattr(self, 'is_shallow', False): # See comment in generate() for why this sadness is a thing. mfdicts = self._mfdicts del self._mfdicts # In a shallow clone, the linknodes callback needs to also include # those file nodes that are in the manifests we sent but weren't # introduced by those manifests. commonctxs = [self._repo[c] for c in commonrevs] oldlinknodes = linknodes clrev = self._repo.changelog.rev def linknodes(flog, fname): for c in commonctxs: try: fnode = c.filenode(fname) self.clrev_to_localrev[c.rev()] = flog.rev(fnode) except error.ManifestLookupError: pass links = oldlinknodes(flog, fname) if len(links) != len(mfdicts): for mf, lr in mfdicts: fnode = mf.get(fname, None) if fnode in links: links[fnode] = min(links[fnode], lr, key=clrev) elif fnode: links[fnode] = lr return links return orig(self, changedfiles, linknodes, commonrevs, source) extensions.wrapfunction( changegroup.cg1packer, 'generatefiles', generatefiles) def ellipsisdata(packer, rev, revlog, p1, p2, data, linknode): n = revlog.node(rev) p1n, p2n = revlog.node(p1), revlog.node(p2) flags = revlog.flags(rev) flags |= narrowrevlog.ELLIPSIS_NODE_FLAG meta = packer.builddeltaheader( n, p1n, p2n, node.nullid, linknode, flags) # TODO: try and actually send deltas for ellipsis data blocks diffheader = mdiff.trivialdiffheader(len(data)) l = len(meta) + len(diffheader) + len(data) return ''.join((changegroup.chunkheader(l), meta, diffheader, data)) def close(orig, self): getattr(self, 'clrev_to_localrev', {}).clear() if getattr(self, 'next_clrev_to_localrev', {}): self.clrev_to_localrev = self.next_clrev_to_localrev del self.next_clrev_to_localrev self.changelog_done = True return orig(self) extensions.wrapfunction(changegroup.cg1packer, 'close', close) # In a perfect world, we'd generate better ellipsis-ified graphs # for non-changelog revlogs. In practice, we haven't started doing # that yet, so the resulting DAGs for the manifestlog and filelogs # are actually full of bogus parentage on all the ellipsis # nodes. This has the side effect that, while the contents are # correct, the individual DAGs might be completely out of whack in # a case like 882681bc3166 and its ancestors (back about 10 # revisions or so) in the main hg repo. # # The one invariant we *know* holds is that the new (potentially # bogus) DAG shape will be valid if we order the nodes in the # order that they're introduced in dramatis personae by the # changelog, so what we do is we sort the non-changelog histories # by the order in which they are used by the changelog. def _sortgroup(orig, self, revlog, nodelist, lookup): if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev: return orig(self, revlog, nodelist, lookup) key = lambda n: self.clnode_to_rev[lookup(n)] return [revlog.rev(n) for n in sorted(nodelist, key=key)] extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup) def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source): '''yield a sequence of changegroup chunks (strings)''' # Note: other than delegating to orig, the only deviation in # logic from normal hg's generate is marked with BEGIN/END # NARROW HACK. if not util.safehasattr(self, 'full_nodes'): # not sending a narrow bundle for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source): yield x return repo = self._repo cl = repo.changelog mfl = repo.manifestlog mfrevlog = mfl._revlog clrevorder = {} mfs = {} # needed manifests fnodes = {} # needed file nodes changedfiles = set() # Callback for the changelog, used to collect changed files and manifest # nodes. # Returns the linkrev node (identity in the changelog case). def lookupcl(x): c = cl.read(x) clrevorder[x] = len(clrevorder) # BEGIN NARROW HACK # # Only update mfs if x is going to be sent. Otherwise we # end up with bogus linkrevs specified for manifests and # we skip some manifest nodes that we should otherwise # have sent. if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis: n = c[0] # record the first changeset introducing this manifest version mfs.setdefault(n, x) # Set this narrow-specific dict so we have the lowest manifest # revnum to look up for this cl revnum. (Part of mapping # changelog ellipsis parents to manifest ellipsis parents) self.next_clrev_to_localrev.setdefault(cl.rev(x), mfrevlog.rev(n)) # We can't trust the changed files list in the changeset if the # client requested a shallow clone. if self.is_shallow: changedfiles.update(mfl[c[0]].read().keys()) else: changedfiles.update(c[3]) # END NARROW HACK # Record a complete list of potentially-changed files in # this manifest. return x self._verbosenote(_('uncompressed size of bundle content:\n')) size = 0 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')): size += len(chunk) yield chunk self._verbosenote(_('%8.i (changelog)\n') % size) # We need to make sure that the linkrev in the changegroup refers to # the first changeset that introduced the manifest or file revision. # The fastpath is usually safer than the slowpath, because the filelogs # are walked in revlog order. # # When taking the slowpath with reorder=None and the manifest revlog # uses generaldelta, the manifest may be walked in the "wrong" order. # Without 'clrevorder', we would get an incorrect linkrev (see fix in # cc0ff93d0c0c). # # When taking the fastpath, we are only vulnerable to reordering # of the changelog itself. The changelog never uses generaldelta, so # it is only reordered when reorder=True. To handle this case, we # simply take the slowpath, which already has the 'clrevorder' logic. # This was also fixed in cc0ff93d0c0c. fastpathlinkrev = fastpathlinkrev and not self._reorder # Treemanifests don't work correctly with fastpathlinkrev # either, because we don't discover which directory nodes to # send along with files. This could probably be fixed. fastpathlinkrev = fastpathlinkrev and ( 'treemanifest' not in repo.requirements) # Shallow clones also don't work correctly with fastpathlinkrev # because file nodes may need to be sent for a manifest even if they # weren't introduced by that manifest. fastpathlinkrev = fastpathlinkrev and not self.is_shallow moreargs = [] if self.generatemanifests.func_code.co_argcount == 7: # The source argument was added to generatemanifests in hg in # 75cc1f1e11f2 (2017/09/11). moreargs.append(source) for chunk in self.generatemanifests(commonrevs, clrevorder, fastpathlinkrev, mfs, fnodes, *moreargs): yield chunk # BEGIN NARROW HACK mfdicts = None if self.is_shallow: mfdicts = [(self._repo.manifestlog[n].read(), lr) for (n, lr) in mfs.iteritems()] # END NARROW HACK mfs.clear() clrevs = set(cl.rev(x) for x in clnodes) if not fastpathlinkrev: def linknodes(unused, fname): return fnodes.get(fname, {}) else: cln = cl.node def linknodes(filerevlog, fname): llr = filerevlog.linkrev fln = filerevlog.node revs = ((r, llr(r)) for r in filerevlog) return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) # BEGIN NARROW HACK # # We need to pass the mfdicts variable down into # generatefiles(), but more than one command might have # wrapped generatefiles so we can't modify the function # signature. Instead, we pass the data to ourselves using an # instance attribute. I'm sorry. self._mfdicts = mfdicts # END NARROW HACK for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, source): yield chunk yield self.close() if clnodes: repo.hook('outgoing', node=node.hex(clnodes[0]), source=source) extensions.wrapfunction(changegroup.cg1packer, 'generate', generate) def revchunk(orig, self, revlog, rev, prev, linknode): if not util.safehasattr(self, 'full_nodes'): # not sending a narrow changegroup for x in orig(self, revlog, rev, prev, linknode): yield x return # build up some mapping information that's useful later. See # the local() nested function below. if not self.changelog_done: self.clnode_to_rev[linknode] = rev linkrev = rev self.clrev_to_localrev[linkrev] = rev else: linkrev = self.clnode_to_rev[linknode] self.clrev_to_localrev[linkrev] = rev # This is a node to send in full, because the changeset it # corresponds to was a full changeset. if linknode in self.full_nodes: for x in orig(self, revlog, rev, prev, linknode): yield x return # At this point, a node can either be one we should skip or an # ellipsis. If it's not an ellipsis, bail immediately. if linkrev not in self.precomputed_ellipsis: return linkparents = self.precomputed_ellipsis[linkrev] def local(clrev): """Turn a changelog revnum into a local revnum. The ellipsis dag is stored as revnums on the changelog, but when we're producing ellipsis entries for non-changelog revlogs, we need to turn those numbers into something local. This does that for us, and during the changelog sending phase will also expand the stored mappings as needed. """ if clrev == node.nullrev: return node.nullrev if not self.changelog_done: # If we're doing the changelog, it's possible that we # have a parent that is already on the client, and we # need to store some extra mapping information so that # our contained ellipsis nodes will be able to resolve # their parents. if clrev not in self.clrev_to_localrev: clnode = revlog.node(clrev) self.clnode_to_rev[clnode] = clrev return clrev # Walk the ellipsis-ized changelog breadth-first looking for a # change that has been linked from the current revlog. # # For a flat manifest revlog only a single step should be necessary # as all relevant changelog entries are relevant to the flat # manifest. # # For a filelog or tree manifest dirlog however not every changelog # entry will have been relevant, so we need to skip some changelog # nodes even after ellipsis-izing. walk = [clrev] while walk: p = walk[0] walk = walk[1:] if p in self.clrev_to_localrev: return self.clrev_to_localrev[p] elif p in self.full_nodes: walk.extend([pp for pp in self._repo.changelog.parentrevs(p) if pp != node.nullrev]) elif p in self.precomputed_ellipsis: walk.extend([pp for pp in self.precomputed_ellipsis[p] if pp != node.nullrev]) else: # In this case, we've got an ellipsis with parents # outside the current bundle (likely an # incremental pull). We "know" that we can use the # value of this same revlog at whatever revision # is pointed to by linknode. "Know" is in scare # quotes because I haven't done enough examination # of edge cases to convince myself this is really # a fact - it works for all the (admittedly # thorough) cases in our testsuite, but I would be # somewhat unsurprised to find a case in the wild # where this breaks down a bit. That said, I don't # know if it would hurt anything. for i in xrange(rev, 0, -1): if revlog.linkrev(i) == clrev: return i # We failed to resolve a parent for this node, so # we crash the changegroup construction. raise error.Abort( 'unable to resolve parent while packing %r %r' ' for changeset %r' % (revlog.indexfile, rev, clrev)) return node.nullrev if not linkparents or ( revlog.parentrevs(rev) == (node.nullrev, node.nullrev)): p1, p2 = node.nullrev, node.nullrev elif len(linkparents) == 1: p1, = sorted(local(p) for p in linkparents) p2 = node.nullrev else: p1, p2 = sorted(local(p) for p in linkparents) yield ellipsisdata( self, rev, revlog, p1, p2, revlog.revision(rev), linknode) extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk) def deltaparent(orig, self, revlog, rev, p1, p2, prev): if util.safehasattr(self, 'full_nodes'): # TODO: send better deltas when in narrow mode. # # changegroup.group() loops over revisions to send, # including revisions we'll skip. What this means is that # `prev` will be a potentially useless delta base for all # ellipsis nodes, as the client likely won't have it. In # the future we should do bookkeeping about which nodes # have been sent to the client, and try to be # significantly smarter about delta bases. This is # slightly tricky because this same code has to work for # all revlogs, and we don't have the linkrev/linknode here. return p1 return orig(self, revlog, rev, p1, p2, prev) extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent) diff --git a/hgext/narrow/narrowcommands.py b/hgext/narrow/narrowcommands.py --- a/hgext/narrow/narrowcommands.py +++ b/hgext/narrow/narrowcommands.py @@ -1,402 +1,402 @@ # narrowcommands.py - command modifications for narrowhg extension # # Copyright 2017 Google, Inc. # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import absolute_import import itertools from mercurial.i18n import _ from mercurial import ( cmdutil, commands, discovery, error, exchange, extensions, hg, merge, node, registrar, repair, repoview, util, ) from . import ( narrowbundle2, narrowrepo, narrowspec, ) table = {} command = registrar.command(table) def setup(): """Wraps user-facing mercurial commands with narrow-aware versions.""" entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd) entry[1].append(('', 'narrow', None, _("create a narrow clone of select files"))) entry[1].append(('', 'depth', '', _("limit the history fetched by distance from heads"))) # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit if 'sparse' not in extensions.enabled(): entry[1].append(('', 'include', [], _("specifically fetch this file/directory"))) entry[1].append( ('', 'exclude', [], _("do not fetch this file/directory, even if included"))) entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd) entry[1].append(('', 'depth', '', _("limit the history fetched by distance from heads"))) extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd) def expandpull(pullop, includepats, excludepats): if not narrowspec.needsexpansion(includepats): return includepats, excludepats heads = pullop.heads or pullop.rheads includepats, excludepats = pullop.remote.expandnarrow( includepats, excludepats, heads) pullop.repo.ui.debug('Expanded narrowspec to inc=%s, exc=%s\n' % ( includepats, excludepats)) return set(includepats), set(excludepats) def clonenarrowcmd(orig, ui, repo, *args, **opts): """Wraps clone command, so 'hg clone' first wraps localrepo.clone().""" wrappedextraprepare = util.nullcontextmanager() opts_narrow = opts['narrow'] if opts_narrow: def pullbundle2extraprepare_widen(orig, pullop, kwargs): # Create narrow spec patterns from clone flags includepats = narrowspec.parsepatterns(opts['include']) excludepats = narrowspec.parsepatterns(opts['exclude']) # If necessary, ask the server to expand the narrowspec. includepats, excludepats = expandpull( pullop, includepats, excludepats) if not includepats and excludepats: # If nothing was included, we assume the user meant to include # everything, except what they asked to exclude. includepats = {'path:.'} narrowspec.save(pullop.repo, includepats, excludepats) # This will populate 'includepats' etc with the values from the # narrowspec we just saved. orig(pullop, kwargs) if opts.get('depth'): kwargs['depth'] = opts['depth'] wrappedextraprepare = extensions.wrappedfunction(exchange, '_pullbundle2extraprepare', pullbundle2extraprepare_widen) def pullnarrow(orig, repo, *args, **kwargs): narrowrepo.wraprepo(repo.unfiltered(), opts_narrow) if isinstance(repo, repoview.repoview): repo.__class__.__bases__ = (repo.__class__.__bases__[0], repo.unfiltered().__class__) if opts_narrow: - repo.requirements.add(narrowrepo.requirement) + repo.requirements.add(narrowrepo.REQUIREMENT) repo._writerequirements() return orig(repo, *args, **kwargs) wrappedpull = extensions.wrappedfunction(exchange, 'pull', pullnarrow) with wrappedextraprepare, wrappedpull: return orig(ui, repo, *args, **opts) def pullnarrowcmd(orig, ui, repo, *args, **opts): """Wraps pull command to allow modifying narrow spec.""" wrappedextraprepare = util.nullcontextmanager() - if narrowrepo.requirement in repo.requirements: + if narrowrepo.REQUIREMENT in repo.requirements: def pullbundle2extraprepare_widen(orig, pullop, kwargs): orig(pullop, kwargs) if opts.get('depth'): kwargs['depth'] = opts['depth'] wrappedextraprepare = extensions.wrappedfunction(exchange, '_pullbundle2extraprepare', pullbundle2extraprepare_widen) with wrappedextraprepare: return orig(ui, repo, *args, **opts) def archivenarrowcmd(orig, ui, repo, *args, **opts): """Wraps archive command to narrow the default includes.""" - if narrowrepo.requirement in repo.requirements: + if narrowrepo.REQUIREMENT in repo.requirements: repo_includes, repo_excludes = repo.narrowpats includes = set(opts.get('include', [])) excludes = set(opts.get('exclude', [])) includes, excludes = narrowspec.restrictpatterns( includes, excludes, repo_includes, repo_excludes) if includes: opts['include'] = includes if excludes: opts['exclude'] = excludes return orig(ui, repo, *args, **opts) def pullbundle2extraprepare(orig, pullop, kwargs): repo = pullop.repo - if narrowrepo.requirement not in repo.requirements: + if narrowrepo.REQUIREMENT not in repo.requirements: return orig(pullop, kwargs) if narrowbundle2.NARROWCAP not in pullop.remotebundle2caps: raise error.Abort(_("server doesn't support narrow clones")) orig(pullop, kwargs) kwargs['narrow'] = True include, exclude = repo.narrowpats kwargs['oldincludepats'] = include kwargs['oldexcludepats'] = exclude kwargs['includepats'] = include kwargs['excludepats'] = exclude kwargs['known'] = [node.hex(ctx.node()) for ctx in repo.set('::%ln', pullop.common) if ctx.node() != node.nullid] if not kwargs['known']: # Mercurial serialized an empty list as '' and deserializes it as # [''], so delete it instead to avoid handling the empty string on the # server. del kwargs['known'] extensions.wrapfunction(exchange,'_pullbundle2extraprepare', pullbundle2extraprepare) def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, newincludes, newexcludes, force): oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes) newmatch = narrowspec.match(repo.root, newincludes, newexcludes) # This is essentially doing "hg outgoing" to find all local-only # commits. We will then check that the local-only commits don't # have any changes to files that will be untracked. unfi = repo.unfiltered() outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc) ui.status(_('looking for local changes to affected paths\n')) localnodes = [] for n in itertools.chain(outgoing.missing, outgoing.excluded): if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()): localnodes.append(n) revstostrip = unfi.revs('descendants(%ln)', localnodes) hiddenrevs = repoview.filterrevs(repo, 'visible') visibletostrip = list(repo.changelog.node(r) for r in (revstostrip - hiddenrevs)) if visibletostrip: ui.status(_('The following changeset(s) or their ancestors have ' 'local changes not on the remote:\n')) maxnodes = 10 if ui.verbose or len(visibletostrip) <= maxnodes: for n in visibletostrip: ui.status('%s\n' % node.short(n)) else: for n in visibletostrip[:maxnodes]: ui.status('%s\n' % node.short(n)) ui.status(_('...and %d more, use --verbose to list all\n') % (len(visibletostrip) - maxnodes)) if not force: raise error.Abort(_('local changes found'), hint=_('use --force-delete-local-changes to ' 'ignore')) if revstostrip: tostrip = [unfi.changelog.node(r) for r in revstostrip] if repo['.'].node() in tostrip: # stripping working copy, so move to a different commit first urev = max(repo.revs('(::%n) - %ln + null', repo['.'].node(), visibletostrip)) hg.clean(repo, urev) repair.strip(ui, unfi, tostrip, topic='narrow') todelete = [] for f, f2, size in repo.store.datafiles(): if f.startswith('data/'): file = f[5:-2] if not newmatch(file): todelete.append(f) elif f.startswith('meta/'): dir = f[5:-13] dirs = ['.'] + sorted(util.dirs({dir})) + [dir] include = True for d in dirs: visit = newmatch.visitdir(d) if not visit: include = False break if visit == 'all': break if not include: todelete.append(f) repo.destroying() with repo.transaction("narrowing"): for f in todelete: ui.status(_('deleting %s\n') % f) util.unlinkpath(repo.svfs.join(f)) repo.store.markremoved(f) for f in repo.dirstate: if not newmatch(f): repo.dirstate.drop(f) repo.wvfs.unlinkpath(f) repo.setnarrowpats(newincludes, newexcludes) repo.destroyed() def _widen(ui, repo, remote, commoninc, newincludes, newexcludes): newmatch = narrowspec.match(repo.root, newincludes, newexcludes) # TODO(martinvonz): Get expansion working with widening/narrowing. if narrowspec.needsexpansion(newincludes): raise error.Abort('Expansion not yet supported on pull') def pullbundle2extraprepare_widen(orig, pullop, kwargs): orig(pullop, kwargs) # The old{in,ex}cludepats have already been set by orig() kwargs['includepats'] = newincludes kwargs['excludepats'] = newexcludes wrappedextraprepare = extensions.wrappedfunction(exchange, '_pullbundle2extraprepare', pullbundle2extraprepare_widen) # define a function that narrowbundle2 can call after creating the # backup bundle, but before applying the bundle from the server def setnewnarrowpats(): repo.setnarrowpats(newincludes, newexcludes) repo.setnewnarrowpats = setnewnarrowpats ds = repo.dirstate p1, p2 = ds.p1(), ds.p2() with ds.parentchange(): ds.setparents(node.nullid, node.nullid) common = commoninc[0] with wrappedextraprepare: exchange.pull(repo, remote, heads=common) with ds.parentchange(): ds.setparents(p1, p2) actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()} addgaction = actions['g'].append mf = repo['.'].manifest().matches(newmatch) for f, fn in mf.iteritems(): if f not in repo.dirstate: addgaction((f, (mf.flags(f), False), "add from widened narrow clone")) merge.applyupdates(repo, actions, wctx=repo[None], mctx=repo['.'], overwrite=False) merge.recordupdates(repo, actions, branchmerge=False) # TODO(rdamazio): Make new matcher format and update description @command('tracked', [('', 'addinclude', [], _('new paths to include')), ('', 'removeinclude', [], _('old paths to no longer include')), ('', 'addexclude', [], _('new paths to exclude')), ('', 'removeexclude', [], _('old paths to no longer exclude')), ('', 'clear', False, _('whether to replace the existing narrowspec')), ('', 'force-delete-local-changes', False, _('forces deletion of local changes when narrowing')), ] + commands.remoteopts, _('[OPTIONS]... [REMOTE]'), inferrepo=True) def trackedcmd(ui, repo, remotepath=None, *pats, **opts): """show or change the current narrowspec With no argument, shows the current narrowspec entries, one per line. Each line will be prefixed with 'I' or 'X' for included or excluded patterns, respectively. The narrowspec is comprised of expressions to match remote files and/or directories that should be pulled into your client. The narrowspec has *include* and *exclude* expressions, with excludes always trumping includes: that is, if a file matches an exclude expression, it will be excluded even if it also matches an include expression. Excluding files that were never included has no effect. Each included or excluded entry is in the format described by 'hg help patterns'. The options allow you to add or remove included and excluded expressions. If --clear is specified, then all previous includes and excludes are DROPPED and replaced by the new ones specified to --addinclude and --addexclude. If --clear is specified without any further options, the narrowspec will be empty and will not match any files. """ - if narrowrepo.requirement not in repo.requirements: + if narrowrepo.REQUIREMENT not in repo.requirements: ui.warn(_('The narrow command is only supported on respositories cloned' ' with --narrow.\n')) return 1 # Before supporting, decide whether it "hg tracked --clear" should mean # tracking no paths or all paths. if opts['clear']: ui.warn(_('The --clear option is not yet supported.\n')) return 1 if narrowspec.needsexpansion(opts['addinclude'] + opts['addexclude']): raise error.Abort('Expansion not yet supported on widen/narrow') addedincludes = narrowspec.parsepatterns(opts['addinclude']) removedincludes = narrowspec.parsepatterns(opts['removeinclude']) addedexcludes = narrowspec.parsepatterns(opts['addexclude']) removedexcludes = narrowspec.parsepatterns(opts['removeexclude']) widening = addedincludes or removedexcludes narrowing = removedincludes or addedexcludes only_show = not widening and not narrowing # Only print the current narrowspec. if only_show: include, exclude = repo.narrowpats ui.pager('tracked') fm = ui.formatter('narrow', opts) for i in sorted(include): fm.startitem() fm.write('status', '%s ', 'I', label='narrow.included') fm.write('pat', '%s\n', i, label='narrow.included') for i in sorted(exclude): fm.startitem() fm.write('status', '%s ', 'X', label='narrow.excluded') fm.write('pat', '%s\n', i, label='narrow.excluded') fm.end() return 0 with repo.wlock(), repo.lock(): cmdutil.bailifchanged(repo) # Find the revisions we have in common with the remote. These will # be used for finding local-only changes for narrowing. They will # also define the set of revisions to update for widening. remotepath = ui.expandpath(remotepath or 'default') url, branches = hg.parseurl(remotepath) ui.status(_('comparing with %s\n') % util.hidepassword(url)) remote = hg.peer(repo, opts, url) commoninc = discovery.findcommonincoming(repo, remote) oldincludes, oldexcludes = repo.narrowpats if narrowing: newincludes = oldincludes - removedincludes newexcludes = oldexcludes | addedexcludes _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, newincludes, newexcludes, opts['force_delete_local_changes']) # _narrow() updated the narrowspec and _widen() below needs to # use the updated values as its base (otherwise removed includes # and addedexcludes will be lost in the resulting narrowspec) oldincludes = newincludes oldexcludes = newexcludes if widening: newincludes = oldincludes | addedincludes newexcludes = oldexcludes - removedexcludes _widen(ui, repo, remote, commoninc, newincludes, newexcludes) return 0 diff --git a/hgext/narrow/narrowrepo.py b/hgext/narrow/narrowrepo.py --- a/hgext/narrow/narrowrepo.py +++ b/hgext/narrow/narrowrepo.py @@ -1,110 +1,110 @@ # narrowrepo.py - repository which supports narrow revlogs, lazy loading # # Copyright 2017 Google, Inc. # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import absolute_import from mercurial import ( bundlerepo, localrepo, match as matchmod, scmutil, ) from .. import ( share, ) from . import ( narrowrevlog, narrowspec, ) -requirement = 'narrowhg' +REQUIREMENT = 'narrowhg' def wrappostshare(orig, sourcerepo, destrepo, **kwargs): orig(sourcerepo, destrepo, **kwargs) - if requirement in sourcerepo.requirements: + if REQUIREMENT in sourcerepo.requirements: with destrepo.wlock(): with destrepo.vfs('shared', 'a') as fp: fp.write(narrowspec.FILENAME + '\n') def unsharenarrowspec(orig, ui, repo, repopath): - if (requirement in repo.requirements + if (REQUIREMENT in repo.requirements and repo.path == repopath and repo.shared()): srcrepo = share._getsrcrepo(repo) with srcrepo.vfs(narrowspec.FILENAME) as f: spec = f.read() with repo.vfs(narrowspec.FILENAME, 'w') as f: f.write(spec) return orig(ui, repo, repopath) def wraprepo(repo, opts_narrow): """Enables narrow clone functionality on a single local repository.""" cacheprop = localrepo.storecache if isinstance(repo, bundlerepo.bundlerepository): # We have to use a different caching property decorator for # bundlerepo because storecache blows up in strange ways on a # bundlerepo. Fortunately, there's no risk of data changing in # a bundlerepo. cacheprop = lambda name: localrepo.unfilteredpropertycache class narrowrepository(repo.__class__): def _constructmanifest(self): manifest = super(narrowrepository, self)._constructmanifest() narrowrevlog.makenarrowmanifestrevlog(manifest, repo) return manifest @cacheprop('00manifest.i') def manifestlog(self): mfl = super(narrowrepository, self).manifestlog narrowrevlog.makenarrowmanifestlog(mfl, self) return mfl def file(self, f): fl = super(narrowrepository, self).file(f) narrowrevlog.makenarrowfilelog(fl, self.narrowmatch()) return fl @localrepo.repofilecache(narrowspec.FILENAME) def narrowpats(self): return narrowspec.load(self) @localrepo.repofilecache(narrowspec.FILENAME) def _narrowmatch(self): include, exclude = self.narrowpats if not opts_narrow and not include and not exclude: return matchmod.always(self.root, '') return narrowspec.match(self.root, include=include, exclude=exclude) # TODO(martinvonz): make this property-like instead? def narrowmatch(self): return self._narrowmatch def setnarrowpats(self, newincludes, newexcludes): narrowspec.save(self, newincludes, newexcludes) self.invalidate(clearfilecache=True) # I'm not sure this is the right place to do this filter. # context._manifestmatches() would probably be better, or perhaps # move it to a later place, in case some of the callers do want to know # which directories changed. This seems to work for now, though. def status(self, *args, **kwargs): s = super(narrowrepository, self).status(*args, **kwargs) narrowmatch = self.narrowmatch() modified = filter(narrowmatch, s.modified) added = filter(narrowmatch, s.added) removed = filter(narrowmatch, s.removed) deleted = filter(narrowmatch, s.deleted) unknown = filter(narrowmatch, s.unknown) ignored = filter(narrowmatch, s.ignored) clean = filter(narrowmatch, s.clean) return scmutil.status(modified, added, removed, deleted, unknown, ignored, clean) repo.__class__ = narrowrepository