diff --git a/contrib/perf.py b/contrib/perf.py --- a/contrib/perf.py +++ b/contrib/perf.py @@ -2126,7 +2126,7 @@ if full: view._branchcaches.clear() else: - view._branchcaches.pop(filtername, None) + view._branchcaches._per_filter.pop(filtername, None) view.branchmap() return d # add filter in smaller subset to bigger subset @@ -2153,10 +2153,10 @@ # add unfiltered allfilters.append(None) - branchcacheread = safeattrsetter(branchmap, b'read') + branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile') branchcachewrite = safeattrsetter(branchmap.branchcache, b'write') - branchcacheread.set(lambda repo: None) - branchcachewrite.set(lambda bc, repo: None) + branchcacheread.set(classmethod(lambda *args: None)) + branchcachewrite.set(lambda *args: None) try: for name in allfilters: printname = name diff --git a/mercurial/branchmap.py b/mercurial/branchmap.py --- a/mercurial/branchmap.py +++ b/mercurial/branchmap.py @@ -30,65 +30,8 @@ pack_into = struct.pack_into unpack_from = struct.unpack_from -def _filename(repo): - """name of a branchcache file for a given repo or repoview""" - filename = "branch2" - if repo.filtername: - filename = '%s-%s' % (filename, repo.filtername) - return filename -def read(repo): - f = None - try: - f = repo.cachevfs(_filename(repo)) - lineiter = iter(f) - cachekey = next(lineiter).rstrip('\n').split(" ", 2) - last, lrev = cachekey[:2] - last, lrev = bin(last), int(lrev) - filteredhash = None - if len(cachekey) > 2: - filteredhash = bin(cachekey[2]) - partial = branchcache(tipnode=last, tiprev=lrev, - filteredhash=filteredhash) - if not partial.validfor(repo): - # invalidate the cache - raise ValueError(r'tip differs') - cl = repo.changelog - for l in lineiter: - l = l.rstrip('\n') - if not l: - continue - node, state, label = l.split(" ", 2) - if state not in 'oc': - raise ValueError(r'invalid branch state') - label = encoding.tolocal(label.strip()) - node = bin(node) - if not cl.hasnode(node): - raise ValueError( - r'node %s does not exist' % pycompat.sysstr(hex(node))) - partial.setdefault(label, []).append(node) - if state == 'c': - partial._closednodes.add(node) - - except (IOError, OSError): - return None - - except Exception as inst: - if repo.ui.debugflag: - msg = 'invalid branchheads cache' - if repo.filtername is not None: - msg += ' (%s)' % repo.filtername - msg += ': %s\n' - repo.ui.debug(msg % pycompat.bytestr(inst)) - partial = None - - finally: - if f: - f.close() - - return partial - -### Nearest subset relation +# ## Nearest subset relation # Nearest subset of filter X is a filter Y so that: # * Y is included in X, # * X - Y is as small as possible. @@ -100,65 +43,90 @@ 'served': 'immutable', 'immutable': 'base'} -def updatecache(repo): - cl = repo.changelog - filtername = repo.filtername - partial = repo._branchcaches.get(filtername) + +class BranchMapCache(object): + """Cache mapping""" + def __init__(self): + self._per_filter = {} + + def __getitem__(self, repo): + self.updatecache(repo) + return self._per_filter[repo.filtername] - revs = [] - if partial is None or not partial.validfor(repo): - partial = read(repo) - if partial is None: + def updatecache(self, repo): + """Update the cache for the given filtered view on a repository""" + # This can trigger updates for the caches for subsets of the filtered + # view, e.g. when there is no cache for this filtered view or the cache + # is stale. + + cl = repo.changelog + filtername = repo.filtername + frbm = self._per_filter.get(filtername) + if frbm is None or not frbm.validfor(repo): + # cache object missing or cache object stale? Read from disk + frbm = branchcache.fromfile(repo) + + revs = [] + if frbm is None: + # no (fresh) cache available anymore, perhaps we can re-use + # the cache for a subset, then extend that to add info on missing + # revisions. subsetname = subsettable.get(filtername) - if subsetname is None: - partial = branchcache() - else: + if subsetname is not None: subset = repo.filtered(subsetname) - partial = subset.branchmap().copy() + frbm = self[subset].copy() extrarevs = subset.changelog.filteredrevs - cl.filteredrevs - revs.extend(r for r in extrarevs if r <= partial.tiprev) - revs.extend(cl.revs(start=partial.tiprev + 1)) - if revs: - partial.update(repo, revs) - partial.write(repo) + revs.extend(r for r in extrarevs if r <= frbm.tiprev) + else: + # nothing to fall back on, start empty. + frbm = branchcache() - assert partial.validfor(repo), filtername - repo._branchcaches[repo.filtername] = partial - -def replacecache(repo, bm): - """Replace the branchmap cache for a repo with a branch mapping. + revs.extend(cl.revs(start=frbm.tiprev + 1)) - This is likely only called during clone with a branch map from a remote. - """ - cl = repo.changelog - clrev = cl.rev - clbranchinfo = cl.branchinfo - rbheads = [] - closed = [] - for bheads in bm.itervalues(): - rbheads.extend(bheads) - for h in bheads: - r = clrev(h) - b, c = clbranchinfo(r) - if c: - closed.append(h) + if revs: + frbm.update(repo, revs) + + assert frbm.validfor(repo), filtername + self._per_filter[repo.filtername] = frbm + + def replace(self, repo, remotebranchmap): + """Replace the branchmap cache for a repo with a branch mapping. + + This is likely only called during clone with a branch map from a + remote. - if rbheads: - rtiprev = max((int(clrev(node)) - for node in rbheads)) - cache = branchcache(bm, - repo[rtiprev].node(), - rtiprev, - closednodes=closed) + """ + cl = repo.changelog + clrev = cl.rev + clbranchinfo = cl.branchinfo + rbheads = [] + closed = [] + for bheads in remotebranchmap.itervalues(): + rbheads += bheads + for h in bheads: + r = clrev(h) + b, c = clbranchinfo(r) + if c: + closed.append(h) - # Try to stick it as low as possible - # filter above served are unlikely to be fetch from a clone - for candidate in ('base', 'immutable', 'served'): - rview = repo.filtered(candidate) - if cache.validfor(rview): - repo._branchcaches[candidate] = cache - cache.write(rview) - break + if rbheads: + rtiprev = max((int(clrev(node)) for node in rbheads)) + cache = branchcache( + remotebranchmap, repo[rtiprev].node(), rtiprev, + closednodes=closed) + + # Try to stick it as low as possible + # filter above served are unlikely to be fetch from a clone + for candidate in ('base', 'immutable', 'served'): + rview = repo.filtered(candidate) + if cache.validfor(rview): + self._per_filter[candidate] = cache + cache.write(rview) + return + + def clear(self): + self._per_filter.clear() + class branchcache(dict): """A dict like object that hold branches heads cache. @@ -180,7 +148,66 @@ The open/closed state is represented by a single letter 'o' or 'c'. This field can be used to avoid changelog reads when determining if a branch head closes a branch or not. + """ + @classmethod + def fromfile(cls, repo): + f = None + try: + f = repo.cachevfs(cls._filename(repo)) + lineiter = iter(f) + cachekey = next(lineiter).rstrip('\n').split(" ", 2) + last, lrev = cachekey[:2] + last, lrev = bin(last), int(lrev) + filteredhash = None + if len(cachekey) > 2: + filteredhash = bin(cachekey[2]) + frbm = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash) + if not frbm.validfor(repo): + # invalidate the cache + raise ValueError(r'tip differs') + cl = repo.changelog + for line in lineiter: + line = line.rstrip('\n') + if not line: + continue + node, state, label = line.split(" ", 2) + if state not in 'oc': + raise ValueError(r'invalid branch state') + label = encoding.tolocal(label.strip()) + node = bin(node) + if not cl.hasnode(node): + raise ValueError( + r'node %s does not exist' % pycompat.sysstr(hex(node))) + frbm.setdefault(label, []).append(node) + if state == 'c': + frbm._closednodes.add(node) + + except (IOError, OSError): + return None + + except Exception as inst: + if repo.ui.debugflag: + msg = 'invalid branchheads cache' + if repo.filtername is not None: + msg += ' (%s)' % repo.filtername + msg += ': %s\n' + repo.ui.debug(msg % pycompat.bytestr(inst)) + frbm = None + + finally: + if f: + f.close() + + return frbm + + @staticmethod + def _filename(repo): + """name of a branchcache file for a given repo or repoview""" + filename = "branch2" + if repo.filtername: + filename = '%s-%s' % (filename, repo.filtername) + return filename def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev, filteredhash=None, closednodes=None): @@ -200,11 +227,14 @@ """Is the cache content valid regarding a repo - False when cached tipnode is unknown or if we detect a strip. - - True when cache is up to date or a subset of current repo.""" + - True when cache is up to date or a subset of current repo. + + """ try: - return ((self.tipnode == repo.changelog.node(self.tiprev)) - and (self.filteredhash == \ - scmutil.filteredhash(repo, self.tiprev))) + return ( + self.tipnode == repo.changelog.node(self.tiprev) and + self.filteredhash == scmutil.filteredhash(repo, self.tiprev) + ) except IndexError: return False @@ -241,12 +271,17 @@ def copy(self): """return an deep copy of the branchcache object""" - return branchcache(self, self.tipnode, self.tiprev, self.filteredhash, - self._closednodes) + return type(self)( + self, + self.tipnode, + self.tiprev, + self.filteredhash, + self._closednodes + ) def write(self, repo): try: - f = repo.cachevfs(_filename(repo), "w", atomictemp=True) + f = repo.cachevfs(self._filename(repo), "w", atomictemp=True) cachekey = [hex(self.tipnode), '%d' % self.tiprev] if self.filteredhash is not None: cachekey.append(hex(self.filteredhash)) @@ -331,6 +366,15 @@ repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n', repo.filtername, duration) + self.write(repo) + + +class remotebranchcache(branchcache): + """Branchmap info for a remote connection, should not write locally""" + def write(self, repo): + pass + + # Revision branch info cache _rbcversion = '-v1' diff --git a/mercurial/discovery.py b/mercurial/discovery.py --- a/mercurial/discovery.py +++ b/mercurial/discovery.py @@ -238,7 +238,7 @@ # D. Update newmap with outgoing changes. # This will possibly add new heads and remove existing ones. - newmap = branchmap.branchcache((branch, heads[1]) + newmap = branchmap.remotebranchcache((branch, heads[1]) for branch, heads in headssum.iteritems() if heads[0] is not None) newmap.update(repo, (ctx.rev() for ctx in missingctx)) diff --git a/mercurial/localrepo.py b/mercurial/localrepo.py --- a/mercurial/localrepo.py +++ b/mercurial/localrepo.py @@ -980,7 +980,7 @@ self._dirstatevalidatewarned = False - self._branchcaches = {} + self._branchcaches = branchmap.BranchMapCache() self._revbranchcache = None self._filterpats = {} self._datafilters = {} @@ -1499,8 +1499,7 @@ def branchmap(self): '''returns a dictionary {branch: [branchheads]} with branchheads ordered by increasing revision number''' - branchmap.updatecache(self) - return self._branchcaches[self.filtername] + return self._branchcaches[self] @unfilteredmethod def revbranchcache(self): @@ -2048,9 +2047,9 @@ return if tr is None or tr.changes['origrepolen'] < len(self): - # updating the unfiltered branchmap should refresh all the others, + # accessing the 'ser ved' branchmap should refresh all the others, self.ui.debug('updating the branch cache\n') - branchmap.updatecache(self.filtered('served')) + self.filtered('served').branchmap() if full: rbc = self.revbranchcache() @@ -2068,7 +2067,7 @@ # can't use delattr on proxy del self.__dict__[r'_tagscache'] - self.unfiltered()._branchcaches.clear() + self._branchcaches.clear() self.invalidatevolatilesets() self._sparsesignaturecache.clear() diff --git a/mercurial/statichttprepo.py b/mercurial/statichttprepo.py --- a/mercurial/statichttprepo.py +++ b/mercurial/statichttprepo.py @@ -13,6 +13,7 @@ from .i18n import _ from . import ( + branchmap, changelog, error, localrepo, @@ -191,7 +192,7 @@ self.changelog = changelog.changelog(self.svfs) self._tags = None self.nodetagscache = None - self._branchcaches = {} + self._branchcaches = branchmap.BranchMapCache() self._revbranchcache = None self.encodepats = None self.decodepats = None diff --git a/mercurial/streamclone.py b/mercurial/streamclone.py --- a/mercurial/streamclone.py +++ b/mercurial/streamclone.py @@ -174,7 +174,7 @@ repo._writerequirements() if rbranchmap: - branchmap.replacecache(repo, rbranchmap) + repo._branchcaches.replace(repo, rbranchmap) repo.invalidate()