Encapsulate reading in a classmethod, to make it clear what kind of object is
being handled.
This is part of a stack of refactoring changes to help performance improvements
down the line.
( )
pulkit |
hg-reviewers |
Encapsulate reading in a classmethod, to make it clear what kind of object is
being handled.
This is part of a stack of refactoring changes to help performance improvements
down the line.
Automatic diff as part of commit; lint not applicable. |
Automatic diff as part of commit; unit tests not applicable. |
contrib/perf.py | ||
---|---|---|
2412–2418 | As I was recently reminded by Yuya, the perf commands are supposed to be compatible with older versions of hg too, and this doesn't seem to be compatible with the branchmap.read() version. |
contrib/perf.py | ||
---|---|---|
2412–2418 | Why is that? @yuja, can you elaborate? This is version-controlled code that changes in lock-step with the branchmap module, adding backwards compatibility tests here would add a costly maintenance burden. If you have an older revision of the branchmap module, you also have an older revision of the perf code. |
contrib/perf.py | ||
---|---|---|
2412–2418 | I'm pretty sure the idea is that you should be able to enable the latest perf extension and run different versions of hg (perhaps versions that you had already built) against it to compare performance. |
Why is that? @yuja, can you elaborate?
This is version-controlled code that changes in lock-step with the branchmap module, adding backwards compatibility tests here would add a costly maintenance burden. If you have an older revision of the branchmap module, you also have an older revision of the perf code.I'm pretty sure the idea is that you should be able to enable the latest perf extension and run different versions of hg (perhaps versions that you had already built) against it to compare performance.
Something like that IIRC. A perf function itself can be improved in a way
that the resulting number isn't comparable with the one of older perf codes.
It's documented in the header. 4533f5b47949
Path | Packages | |||
---|---|---|---|---|
M | contrib/perf.py (21 lines) | |||
M | mercurial/branchmap.py (119 lines) |
# warm the cache | # warm the cache | ||||
if not full: | if not full: | ||||
for name in allfilters: | for name in allfilters: | ||||
repo.filtered(name).branchmap() | repo.filtered(name).branchmap() | ||||
if not filternames or b'unfiltered' in filternames: | if not filternames or b'unfiltered' in filternames: | ||||
# add unfiltered | # add unfiltered | ||||
allfilters.append(None) | allfilters.append(None) | ||||
if util.safehasattr(branchmap.branchcache, 'fromfile'): | |||||
branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile') | |||||
branchcacheread.set(classmethod(lambda *args: None)) | |||||
else: | |||||
# older versions | |||||
branchcacheread = safeattrsetter(branchmap, b'read') | branchcacheread = safeattrsetter(branchmap, b'read') | ||||
branchcacheread.set(lambda *args: None) | |||||
martinvonz: As I was recently reminded by Yuya, the perf commands are supposed to be compatible with older… | |||||
Why is that? @yuja, can you elaborate? This is version-controlled code that changes in lock-step with the branchmap module, adding backwards compatibility tests here would add a costly maintenance burden. If you have an older revision of the branchmap module, you also have an older revision of the perf code. mjpieters: Why is that? @yuja, can you elaborate?
This is version-controlled code that changes in lock… | |||||
I'm pretty sure the idea is that you should be able to enable the latest perf extension and run different versions of hg (perhaps versions that you had already built) against it to compare performance. martinvonz: I'm pretty sure the idea is that you should be able to enable the latest perf extension and run… | |||||
branchcachewrite = safeattrsetter(branchmap.branchcache, b'write') | branchcachewrite = safeattrsetter(branchmap.branchcache, b'write') | ||||
branchcacheread.set(lambda repo: None) | branchcachewrite.set(lambda *args: None) | ||||
branchcachewrite.set(lambda bc, repo: None) | |||||
try: | try: | ||||
for name in allfilters: | for name in allfilters: | ||||
printname = name | printname = name | ||||
if name is None: | if name is None: | ||||
printname = b'unfiltered' | printname = b'unfiltered' | ||||
timer(getbranchmap(name), title=str(printname)) | timer(getbranchmap(name), title=str(printname)) | ||||
finally: | finally: | ||||
branchcacheread.restore() | branchcacheread.restore() | ||||
subsettable = getbranchmapsubsettable() | subsettable = getbranchmapsubsettable() | ||||
if filter is None: | if filter is None: | ||||
repo = repo.unfiltered() | repo = repo.unfiltered() | ||||
else: | else: | ||||
repo = repoview.repoview(repo, filter) | repo = repoview.repoview(repo, filter) | ||||
repo.branchmap() # make sure we have a relevant, up to date branchmap | repo.branchmap() # make sure we have a relevant, up to date branchmap | ||||
try: | |||||
fromfile = branchmap.branchcache.fromfile | |||||
except AttributeError: | |||||
# older versions | |||||
fromfile = branchmap.read | |||||
currentfilter = filter | currentfilter = filter | ||||
# try once without timer, the filter may not be cached | # try once without timer, the filter may not be cached | ||||
while branchmap.read(repo) is None: | while fromfile(repo) is None: | ||||
currentfilter = subsettable.get(currentfilter) | currentfilter = subsettable.get(currentfilter) | ||||
if currentfilter is None: | if currentfilter is None: | ||||
raise error.Abort(b'No branchmap cached for %s repo' | raise error.Abort(b'No branchmap cached for %s repo' | ||||
% (filter or b'unfiltered')) | % (filter or b'unfiltered')) | ||||
repo = repo.filtered(currentfilter) | repo = repo.filtered(currentfilter) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
def setup(): | def setup(): | ||||
if clearrevlogs: | if clearrevlogs: | ||||
clearchangelog(repo) | clearchangelog(repo) | ||||
def bench(): | def bench(): | ||||
branchmap.read(repo) | fromfile(repo) | ||||
timer(bench, setup=setup) | timer(bench, setup=setup) | ||||
fm.end() | fm.end() | ||||
@command(b'perfloadmarkers') | @command(b'perfloadmarkers') | ||||
def perfloadmarkers(ui, repo): | def perfloadmarkers(ui, repo): | ||||
"""benchmark the time to parse the on-disk markers for a repo | """benchmark the time to parse the on-disk markers for a repo | ||||
Result is the number of markers in the repo.""" | Result is the number of markers in the repo.""" |
from .utils import ( | from .utils import ( | ||||
stringutil, | stringutil, | ||||
) | ) | ||||
calcsize = struct.calcsize | calcsize = struct.calcsize | ||||
pack_into = struct.pack_into | pack_into = struct.pack_into | ||||
unpack_from = struct.unpack_from | unpack_from = struct.unpack_from | ||||
def _filename(repo): | |||||
"""name of a branchcache file for a given repo or repoview""" | |||||
filename = "branch2" | |||||
if repo.filtername: | |||||
filename = '%s-%s' % (filename, repo.filtername) | |||||
return filename | |||||
def read(repo): | |||||
f = None | |||||
try: | |||||
f = repo.cachevfs(_filename(repo)) | |||||
lineiter = iter(f) | |||||
cachekey = next(lineiter).rstrip('\n').split(" ", 2) | |||||
last, lrev = cachekey[:2] | |||||
last, lrev = bin(last), int(lrev) | |||||
filteredhash = None | |||||
if len(cachekey) > 2: | |||||
filteredhash = bin(cachekey[2]) | |||||
bcache = branchcache(tipnode=last, tiprev=lrev, | |||||
filteredhash=filteredhash) | |||||
if not bcache.validfor(repo): | |||||
# invalidate the cache | |||||
raise ValueError(r'tip differs') | |||||
cl = repo.changelog | |||||
for l in lineiter: | |||||
l = l.rstrip('\n') | |||||
if not l: | |||||
continue | |||||
node, state, label = l.split(" ", 2) | |||||
if state not in 'oc': | |||||
raise ValueError(r'invalid branch state') | |||||
label = encoding.tolocal(label.strip()) | |||||
node = bin(node) | |||||
if not cl.hasnode(node): | |||||
raise ValueError( | |||||
r'node %s does not exist' % pycompat.sysstr(hex(node))) | |||||
bcache.setdefault(label, []).append(node) | |||||
if state == 'c': | |||||
bcache._closednodes.add(node) | |||||
except (IOError, OSError): | |||||
return None | |||||
except Exception as inst: | |||||
if repo.ui.debugflag: | |||||
msg = 'invalid branchheads cache' | |||||
if repo.filtername is not None: | |||||
msg += ' (%s)' % repo.filtername | |||||
msg += ': %s\n' | |||||
repo.ui.debug(msg % pycompat.bytestr(inst)) | |||||
bcache = None | |||||
finally: | |||||
if f: | |||||
f.close() | |||||
return bcache | |||||
### Nearest subset relation | ### Nearest subset relation | ||||
# Nearest subset of filter X is a filter Y so that: | # Nearest subset of filter X is a filter Y so that: | ||||
# * Y is included in X, | # * Y is included in X, | ||||
# * X - Y is as small as possible. | # * X - Y is as small as possible. | ||||
# This create and ordering used for branchmap purpose. | # This create and ordering used for branchmap purpose. | ||||
# the ordering may be partial | # the ordering may be partial | ||||
subsettable = {None: 'visible', | subsettable = {None: 'visible', | ||||
'visible-hidden': 'visible', | 'visible-hidden': 'visible', | ||||
'visible': 'served', | 'visible': 'served', | ||||
'served': 'immutable', | 'served': 'immutable', | ||||
'immutable': 'base'} | 'immutable': 'base'} | ||||
def updatecache(repo): | def updatecache(repo): | ||||
cl = repo.changelog | cl = repo.changelog | ||||
filtername = repo.filtername | filtername = repo.filtername | ||||
bcache = repo._branchcaches.get(filtername) | bcache = repo._branchcaches.get(filtername) | ||||
revs = [] | revs = [] | ||||
if bcache is None or not bcache.validfor(repo): | if bcache is None or not bcache.validfor(repo): | ||||
bcache = read(repo) | bcache = branchcache.fromfile(repo) | ||||
if bcache is None: | if bcache is None: | ||||
subsetname = subsettable.get(filtername) | subsetname = subsettable.get(filtername) | ||||
if subsetname is None: | if subsetname is None: | ||||
bcache = branchcache() | bcache = branchcache() | ||||
else: | else: | ||||
subset = repo.filtered(subsetname) | subset = repo.filtered(subsetname) | ||||
bcache = subset.branchmap().copy() | bcache = subset.branchmap().copy() | ||||
extrarevs = subset.changelog.filteredrevs - cl.filteredrevs | extrarevs = subset.changelog.filteredrevs - cl.filteredrevs | ||||
The first line is used to check if the cache is still valid. If the | The first line is used to check if the cache is still valid. If the | ||||
branch cache is for a filtered repo view, an optional third hash is | branch cache is for a filtered repo view, an optional third hash is | ||||
included that hashes the hashes of all filtered revisions. | included that hashes the hashes of all filtered revisions. | ||||
The open/closed state is represented by a single letter 'o' or 'c'. | The open/closed state is represented by a single letter 'o' or 'c'. | ||||
This field can be used to avoid changelog reads when determining if a | This field can be used to avoid changelog reads when determining if a | ||||
branch head closes a branch or not. | branch head closes a branch or not. | ||||
""" | """ | ||||
@classmethod | |||||
def fromfile(cls, repo): | |||||
f = None | |||||
try: | |||||
f = repo.cachevfs(cls._filename(repo)) | |||||
lineiter = iter(f) | |||||
cachekey = next(lineiter).rstrip('\n').split(" ", 2) | |||||
last, lrev = cachekey[:2] | |||||
last, lrev = bin(last), int(lrev) | |||||
filteredhash = None | |||||
if len(cachekey) > 2: | |||||
filteredhash = bin(cachekey[2]) | |||||
bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash) | |||||
if not bcache.validfor(repo): | |||||
# invalidate the cache | |||||
raise ValueError(r'tip differs') | |||||
cl = repo.changelog | |||||
for line in lineiter: | |||||
line = line.rstrip('\n') | |||||
if not line: | |||||
continue | |||||
node, state, label = line.split(" ", 2) | |||||
if state not in 'oc': | |||||
raise ValueError(r'invalid branch state') | |||||
label = encoding.tolocal(label.strip()) | |||||
node = bin(node) | |||||
if not cl.hasnode(node): | |||||
raise ValueError( | |||||
r'node %s does not exist' % pycompat.sysstr(hex(node))) | |||||
bcache.setdefault(label, []).append(node) | |||||
if state == 'c': | |||||
bcache._closednodes.add(node) | |||||
except (IOError, OSError): | |||||
return None | |||||
except Exception as inst: | |||||
if repo.ui.debugflag: | |||||
msg = 'invalid branchheads cache' | |||||
if repo.filtername is not None: | |||||
msg += ' (%s)' % repo.filtername | |||||
msg += ': %s\n' | |||||
repo.ui.debug(msg % pycompat.bytestr(inst)) | |||||
bcache = None | |||||
finally: | |||||
if f: | |||||
f.close() | |||||
return bcache | |||||
@staticmethod | |||||
def _filename(repo): | |||||
"""name of a branchcache file for a given repo or repoview""" | |||||
filename = "branch2" | |||||
if repo.filtername: | |||||
filename = '%s-%s' % (filename, repo.filtername) | |||||
return filename | |||||
def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev, | def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev, | ||||
filteredhash=None, closednodes=None): | filteredhash=None, closednodes=None): | ||||
super(branchcache, self).__init__(entries) | super(branchcache, self).__init__(entries) | ||||
self.tipnode = tipnode | self.tipnode = tipnode | ||||
self.tiprev = tiprev | self.tiprev = tiprev | ||||
self.filteredhash = filteredhash | self.filteredhash = filteredhash | ||||
# closednodes is a set of nodes that close their branch. If the branch | # closednodes is a set of nodes that close their branch. If the branch | ||||
def copy(self): | def copy(self): | ||||
"""return an deep copy of the branchcache object""" | """return an deep copy of the branchcache object""" | ||||
return branchcache(self, self.tipnode, self.tiprev, self.filteredhash, | return branchcache(self, self.tipnode, self.tiprev, self.filteredhash, | ||||
self._closednodes) | self._closednodes) | ||||
def write(self, repo): | def write(self, repo): | ||||
try: | try: | ||||
f = repo.cachevfs(_filename(repo), "w", atomictemp=True) | f = repo.cachevfs(self._filename(repo), "w", atomictemp=True) | ||||
cachekey = [hex(self.tipnode), '%d' % self.tiprev] | cachekey = [hex(self.tipnode), '%d' % self.tiprev] | ||||
if self.filteredhash is not None: | if self.filteredhash is not None: | ||||
cachekey.append(hex(self.filteredhash)) | cachekey.append(hex(self.filteredhash)) | ||||
f.write(" ".join(cachekey) + '\n') | f.write(" ".join(cachekey) + '\n') | ||||
nodecount = 0 | nodecount = 0 | ||||
for label, nodes in sorted(self.iteritems()): | for label, nodes in sorted(self.iteritems()): | ||||
for node in nodes: | for node in nodes: | ||||
nodecount += 1 | nodecount += 1 |
As I was recently reminded by Yuya, the perf commands are supposed to be compatible with older versions of hg too, and this doesn't seem to be compatible with the branchmap.read() version.