Details
Details
- Reviewers
- None
- Group Reviewers
hg-reviewers - Commits
- rHGb137a6793c51: branchcache: make entries a private attribute
Diff Detail
Diff Detail
- Repository
- rHG Mercurial
- Lint
Lint Skipped - Unit
Unit Tests Skipped
| hg-reviewers |
| Lint Skipped |
| Unit Tests Skipped |
| self.filteredhash = filteredhash | self.filteredhash = filteredhash | ||||
| # closednodes is a set of nodes that close their branch. If the branch | # closednodes is a set of nodes that close their branch. If the branch | ||||
| # cache has been updated, it may contain nodes that are no longer | # cache has been updated, it may contain nodes that are no longer | ||||
| # heads. | # heads. | ||||
| if closednodes is None: | if closednodes is None: | ||||
| self._closednodes = set() | self._closednodes = set() | ||||
| else: | else: | ||||
| self._closednodes = closednodes | self._closednodes = closednodes | ||||
| self.entries = dict(entries) | self._entries = dict(entries) | ||||
| def __iter__(self): | def __iter__(self): | ||||
| return iter(self.entries) | return iter(self._entries) | ||||
| def __setitem__(self, key, value): | def __setitem__(self, key, value): | ||||
| self.entries[key] = value | self._entries[key] = value | ||||
| def __getitem__(self, key): | def __getitem__(self, key): | ||||
| return self.entries[key] | return self._entries[key] | ||||
| def iteritems(self): | def iteritems(self): | ||||
| return self.entries.iteritems() | return self._entries.iteritems() | ||||
| def hasbranch(self, label): | def hasbranch(self, label): | ||||
| """ checks whether a branch of this name exists or not """ | """ checks whether a branch of this name exists or not """ | ||||
| return label in self.entries | return label in self._entries | ||||
| @classmethod | @classmethod | ||||
| def fromfile(cls, repo): | def fromfile(cls, repo): | ||||
| f = None | f = None | ||||
| try: | try: | ||||
| f = repo.cachevfs(cls._filename(repo)) | f = repo.cachevfs(cls._filename(repo)) | ||||
| lineiter = iter(f) | lineiter = iter(f) | ||||
| cachekey = next(lineiter).rstrip('\n').split(" ", 2) | cachekey = next(lineiter).rstrip('\n').split(" ", 2) | ||||
| node, state, label = line.split(" ", 2) | node, state, label = line.split(" ", 2) | ||||
| if state not in 'oc': | if state not in 'oc': | ||||
| raise ValueError(r'invalid branch state') | raise ValueError(r'invalid branch state') | ||||
| label = encoding.tolocal(label.strip()) | label = encoding.tolocal(label.strip()) | ||||
| node = bin(node) | node = bin(node) | ||||
| if not cl.hasnode(node): | if not cl.hasnode(node): | ||||
| raise ValueError( | raise ValueError( | ||||
| r'node %s does not exist' % pycompat.sysstr(hex(node))) | r'node %s does not exist' % pycompat.sysstr(hex(node))) | ||||
| self.entries.setdefault(label, []).append(node) | self._entries.setdefault(label, []).append(node) | ||||
| if state == 'c': | if state == 'c': | ||||
| self._closednodes.add(node) | self._closednodes.add(node) | ||||
| @staticmethod | @staticmethod | ||||
| def _filename(repo): | def _filename(repo): | ||||
| """name of a branchcache file for a given repo or repoview""" | """name of a branchcache file for a given repo or repoview""" | ||||
| filename = "branch2" | filename = "branch2" | ||||
| if repo.filtername: | if repo.filtername: | ||||
| return heads | return heads | ||||
| def iterbranches(self): | def iterbranches(self): | ||||
| for bn, heads in self.iteritems(): | for bn, heads in self.iteritems(): | ||||
| yield (bn, heads) + self._branchtip(heads) | yield (bn, heads) + self._branchtip(heads) | ||||
| def iterheads(self): | def iterheads(self): | ||||
| """ returns all the heads """ | """ returns all the heads """ | ||||
| return self.entries.itervalues() | return self._entries.itervalues() | ||||
| def copy(self): | def copy(self): | ||||
| """return an deep copy of the branchcache object""" | """return an deep copy of the branchcache object""" | ||||
| return branchcache( | return branchcache( | ||||
| self.entries, self.tipnode, self.tiprev, self.filteredhash, | self._entries, self.tipnode, self.tiprev, self.filteredhash, | ||||
| self._closednodes) | self._closednodes) | ||||
| def write(self, repo): | def write(self, repo): | ||||
| try: | try: | ||||
| f = repo.cachevfs(self._filename(repo), "w", atomictemp=True) | f = repo.cachevfs(self._filename(repo), "w", atomictemp=True) | ||||
| cachekey = [hex(self.tipnode), '%d' % self.tiprev] | cachekey = [hex(self.tipnode), '%d' % self.tiprev] | ||||
| if self.filteredhash is not None: | if self.filteredhash is not None: | ||||
| cachekey.append(hex(self.filteredhash)) | cachekey.append(hex(self.filteredhash)) | ||||
| f.write(" ".join(cachekey) + '\n') | f.write(" ".join(cachekey) + '\n') | ||||
| nodecount = 0 | nodecount = 0 | ||||
| for label, nodes in sorted(self.iteritems()): | for label, nodes in sorted(self.iteritems()): | ||||
| label = encoding.fromlocal(label) | label = encoding.fromlocal(label) | ||||
| for node in nodes: | for node in nodes: | ||||
| nodecount += 1 | nodecount += 1 | ||||
| if node in self._closednodes: | if node in self._closednodes: | ||||
| state = 'c' | state = 'c' | ||||
| else: | else: | ||||
| state = 'o' | state = 'o' | ||||
| f.write("%s %s %s\n" % (hex(node), state, label)) | f.write("%s %s %s\n" % (hex(node), state, label)) | ||||
| f.close() | f.close() | ||||
| repo.ui.log('branchcache', | repo.ui.log('branchcache', | ||||
| 'wrote %s branch cache with %d labels and %d nodes\n', | 'wrote %s branch cache with %d labels and %d nodes\n', | ||||
| repo.filtername, len(self.entries), nodecount) | repo.filtername, len(self._entries), nodecount) | ||||
| except (IOError, OSError, error.Abort) as inst: | except (IOError, OSError, error.Abort) as inst: | ||||
| # Abort may be raised by read only opener, so log and continue | # Abort may be raised by read only opener, so log and continue | ||||
| repo.ui.debug("couldn't write branch cache: %s\n" % | repo.ui.debug("couldn't write branch cache: %s\n" % | ||||
| stringutil.forcebytestr(inst)) | stringutil.forcebytestr(inst)) | ||||
| def update(self, repo, revgen): | def update(self, repo, revgen): | ||||
| """Given a branchhead cache, self, that may have extra nodes or be | """Given a branchhead cache, self, that may have extra nodes or be | ||||
| missing heads, and a generator of nodes that are strictly a superset of | missing heads, and a generator of nodes that are strictly a superset of | ||||
| # fetch current topological heads to speed up filtering | # fetch current topological heads to speed up filtering | ||||
| topoheads = set(cl.headrevs()) | topoheads = set(cl.headrevs()) | ||||
| # if older branchheads are reachable from new ones, they aren't | # if older branchheads are reachable from new ones, they aren't | ||||
| # really branchheads. Note checking parents is insufficient: | # really branchheads. Note checking parents is insufficient: | ||||
| # 1 (branch a) -> 2 (branch b) -> 3 (branch a) | # 1 (branch a) -> 2 (branch b) -> 3 (branch a) | ||||
| for branch, newheadrevs in newbranches.iteritems(): | for branch, newheadrevs in newbranches.iteritems(): | ||||
| bheads = self.entries.setdefault(branch, []) | bheads = self._entries.setdefault(branch, []) | ||||
| bheadset = set(cl.rev(node) for node in bheads) | bheadset = set(cl.rev(node) for node in bheads) | ||||
| # This have been tested True on all internal usage of this function. | # This have been tested True on all internal usage of this function. | ||||
| # run it again in case of doubt | # run it again in case of doubt | ||||
| # assert not (set(bheadrevs) & set(newheadrevs)) | # assert not (set(bheadrevs) & set(newheadrevs)) | ||||
| bheadset.update(newheadrevs) | bheadset.update(newheadrevs) | ||||
| # This prunes out two kinds of heads - heads that are superseded by | # This prunes out two kinds of heads - heads that are superseded by | ||||