This is done using contrib/byteify-strings.py. There is just one extra change
done to make sure lines don't get longer than 80 chars.
- skip-blame because just b'' prefixes.
( )
hg-reviewers |
This is done using contrib/byteify-strings.py. There is just one extra change
done to make sure lines don't get longer than 80 chars.
Automatic diff as part of commit; lint not applicable. |
Automatic diff as part of commit; unit tests not applicable. |
return getattr(thing, attr, _undefined) is not _undefined | return getattr(thing, attr, _undefined) is not _undefined | ||||
setattr(util, 'safehasattr', safehasattr) | setattr(util, 'safehasattr', safehasattr) | ||||
# for "historical portability": | # for "historical portability": | ||||
# define util.timer forcibly, because util.timer has been available | # define util.timer forcibly, because util.timer has been available | ||||
# since ae5d60bb70c9 | # since ae5d60bb70c9 | ||||
if safehasattr(time, 'perf_counter'): | if safehasattr(time, 'perf_counter'): | ||||
util.timer = time.perf_counter | util.timer = time.perf_counter | ||||
elif os.name == 'nt': | elif os.name == b'nt': | ||||
util.timer = time.clock | util.timer = time.clock | ||||
else: | else: | ||||
util.timer = time.time | util.timer = time.time | ||||
# for "historical portability": | # for "historical portability": | ||||
# use locally defined empty option list, if formatteropts isn't | # use locally defined empty option list, if formatteropts isn't | ||||
# available, because commands.formatteropts has been available since | # available, because commands.formatteropts has been available since | ||||
# 3.2 (or 7a7eed5176a4), even though formatting itself has been | # 3.2 (or 7a7eed5176a4), even though formatting itself has been | ||||
# available since 2.2 (or ae5f92e154d3) | # available since 2.2 (or ae5f92e154d3) | ||||
formatteropts = getattr(cmdutil, "formatteropts", | formatteropts = getattr(cmdutil, "formatteropts", | ||||
getattr(commands, "formatteropts", [])) | getattr(commands, "formatteropts", [])) | ||||
# for "historical portability": | # for "historical portability": | ||||
# use locally defined option list, if debugrevlogopts isn't available, | # use locally defined option list, if debugrevlogopts isn't available, | ||||
# because commands.debugrevlogopts has been available since 3.7 (or | # because commands.debugrevlogopts has been available since 3.7 (or | ||||
# 5606f7d0d063), even though cmdutil.openrevlog() has been available | # 5606f7d0d063), even though cmdutil.openrevlog() has been available | ||||
# since 1.9 (or a79fea6b3e77). | # since 1.9 (or a79fea6b3e77). | ||||
revlogopts = getattr(cmdutil, "debugrevlogopts", | revlogopts = getattr(cmdutil, "debugrevlogopts", | ||||
getattr(commands, "debugrevlogopts", [ | getattr(commands, "debugrevlogopts", [ | ||||
('c', 'changelog', False, ('open changelog')), | (b'c', b'changelog', False, (b'open changelog')), | ||||
('m', 'manifest', False, ('open manifest')), | (b'm', b'manifest', False, (b'open manifest')), | ||||
('', 'dir', False, ('open directory manifest')), | (b'', b'dir', False, (b'open directory manifest')), | ||||
])) | ])) | ||||
cmdtable = {} | cmdtable = {} | ||||
# for "historical portability": | # for "historical portability": | ||||
# define parsealiases locally, because cmdutil.parsealiases has been | # define parsealiases locally, because cmdutil.parsealiases has been | ||||
# available since 1.5 (or 6252852b4332) | # available since 1.5 (or 6252852b4332) | ||||
def parsealiases(cmd): | def parsealiases(cmd): | ||||
return cmd.lstrip("^").split("|") | return cmd.lstrip(b"^").split(b"|") | ||||
if safehasattr(registrar, 'command'): | if safehasattr(registrar, 'command'): | ||||
command = registrar.command(cmdtable) | command = registrar.command(cmdtable) | ||||
elif safehasattr(cmdutil, 'command'): | elif safehasattr(cmdutil, 'command'): | ||||
command = cmdutil.command(cmdtable) | command = cmdutil.command(cmdtable) | ||||
if 'norepo' not in getargspec(command).args: | if b'norepo' not in getargspec(command).args: | ||||
# for "historical portability": | # for "historical portability": | ||||
# wrap original cmdutil.command, because "norepo" option has | # wrap original cmdutil.command, because "norepo" option has | ||||
# been available since 3.1 (or 75a96326cecb) | # been available since 3.1 (or 75a96326cecb) | ||||
_command = command | _command = command | ||||
def command(name, options=(), synopsis=None, norepo=False): | def command(name, options=(), synopsis=None, norepo=False): | ||||
if norepo: | if norepo: | ||||
commands.norepo += ' %s' % ' '.join(parsealiases(name)) | commands.norepo += b' %s' % b' '.join(parsealiases(name)) | ||||
return _command(name, list(options), synopsis) | return _command(name, list(options), synopsis) | ||||
else: | else: | ||||
# for "historical portability": | # for "historical portability": | ||||
# define "@command" annotation locally, because cmdutil.command | # define "@command" annotation locally, because cmdutil.command | ||||
# has been available since 1.9 (or 2daa5179e73f) | # has been available since 1.9 (or 2daa5179e73f) | ||||
def command(name, options=(), synopsis=None, norepo=False): | def command(name, options=(), synopsis=None, norepo=False): | ||||
def decorator(func): | def decorator(func): | ||||
if synopsis: | if synopsis: | ||||
cmdtable[name] = func, list(options), synopsis | cmdtable[name] = func, list(options), synopsis | ||||
else: | else: | ||||
cmdtable[name] = func, list(options) | cmdtable[name] = func, list(options) | ||||
if norepo: | if norepo: | ||||
commands.norepo += ' %s' % ' '.join(parsealiases(name)) | commands.norepo += b' %s' % b' '.join(parsealiases(name)) | ||||
return func | return func | ||||
return decorator | return decorator | ||||
try: | try: | ||||
import mercurial.registrar | import mercurial.registrar | ||||
import mercurial.configitems | import mercurial.configitems | ||||
configtable = {} | configtable = {} | ||||
configitem = mercurial.registrar.configitem(configtable) | configitem = mercurial.registrar.configitem(configtable) | ||||
configitem('perf', 'presleep', | configitem(b'perf', b'presleep', | ||||
default=mercurial.configitems.dynamicdefault, | default=mercurial.configitems.dynamicdefault, | ||||
) | ) | ||||
configitem('perf', 'stub', | configitem(b'perf', b'stub', | ||||
default=mercurial.configitems.dynamicdefault, | default=mercurial.configitems.dynamicdefault, | ||||
) | ) | ||||
configitem('perf', 'parentscount', | configitem(b'perf', b'parentscount', | ||||
default=mercurial.configitems.dynamicdefault, | default=mercurial.configitems.dynamicdefault, | ||||
) | ) | ||||
configitem('perf', 'all-timing', | configitem(b'perf', b'all-timing', | ||||
default=mercurial.configitems.dynamicdefault, | default=mercurial.configitems.dynamicdefault, | ||||
) | ) | ||||
except (ImportError, AttributeError): | except (ImportError, AttributeError): | ||||
pass | pass | ||||
def getlen(ui): | def getlen(ui): | ||||
if ui.configbool("perf", "stub", False): | if ui.configbool(b"perf", b"stub", False): | ||||
return lambda x: 1 | return lambda x: 1 | ||||
return len | return len | ||||
def gettimer(ui, opts=None): | def gettimer(ui, opts=None): | ||||
"""return a timer function and formatter: (timer, formatter) | """return a timer function and formatter: (timer, formatter) | ||||
This function exists to gather the creation of formatter in a single | This function exists to gather the creation of formatter in a single | ||||
place instead of duplicating it in all performance commands.""" | place instead of duplicating it in all performance commands.""" | ||||
# enforce an idle period before execution to counteract power management | # enforce an idle period before execution to counteract power management | ||||
# experimental config: perf.presleep | # experimental config: perf.presleep | ||||
time.sleep(getint(ui, "perf", "presleep", 1)) | time.sleep(getint(ui, b"perf", b"presleep", 1)) | ||||
if opts is None: | if opts is None: | ||||
opts = {} | opts = {} | ||||
# redirect all to stderr unless buffer api is in use | # redirect all to stderr unless buffer api is in use | ||||
if not ui._buffers: | if not ui._buffers: | ||||
ui = ui.copy() | ui = ui.copy() | ||||
uifout = safeattrsetter(ui, 'fout', ignoremissing=True) | uifout = safeattrsetter(ui, b'fout', ignoremissing=True) | ||||
if uifout: | if uifout: | ||||
# for "historical portability": | # for "historical portability": | ||||
# ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d) | # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d) | ||||
uifout.set(ui.ferr) | uifout.set(ui.ferr) | ||||
# get a formatter | # get a formatter | ||||
uiformatter = getattr(ui, 'formatter', None) | uiformatter = getattr(ui, 'formatter', None) | ||||
if uiformatter: | if uiformatter: | ||||
fm = uiformatter('perf', opts) | fm = uiformatter(b'perf', opts) | ||||
else: | else: | ||||
# for "historical portability": | # for "historical portability": | ||||
# define formatter locally, because ui.formatter has been | # define formatter locally, because ui.formatter has been | ||||
# available since 2.2 (or ae5f92e154d3) | # available since 2.2 (or ae5f92e154d3) | ||||
from mercurial import node | from mercurial import node | ||||
class defaultformatter(object): | class defaultformatter(object): | ||||
"""Minimized composition of baseformatter and plainformatter | """Minimized composition of baseformatter and plainformatter | ||||
""" | """ | ||||
self._ui.write(deftext % fielddata, **opts) | self._ui.write(deftext % fielddata, **opts) | ||||
def condwrite(self, cond, fields, deftext, *fielddata, **opts): | def condwrite(self, cond, fields, deftext, *fielddata, **opts): | ||||
if cond: | if cond: | ||||
self._ui.write(deftext % fielddata, **opts) | self._ui.write(deftext % fielddata, **opts) | ||||
def plain(self, text, **opts): | def plain(self, text, **opts): | ||||
self._ui.write(text, **opts) | self._ui.write(text, **opts) | ||||
def end(self): | def end(self): | ||||
pass | pass | ||||
fm = defaultformatter(ui, 'perf', opts) | fm = defaultformatter(ui, b'perf', opts) | ||||
# stub function, runs code only once instead of in a loop | # stub function, runs code only once instead of in a loop | ||||
# experimental config: perf.stub | # experimental config: perf.stub | ||||
if ui.configbool("perf", "stub", False): | if ui.configbool(b"perf", b"stub", False): | ||||
return functools.partial(stub_timer, fm), fm | return functools.partial(stub_timer, fm), fm | ||||
# experimental config: perf.all-timing | # experimental config: perf.all-timing | ||||
displayall = ui.configbool("perf", "all-timing", False) | displayall = ui.configbool(b"perf", b"all-timing", False) | ||||
return functools.partial(_timer, fm, displayall=displayall), fm | return functools.partial(_timer, fm, displayall=displayall), fm | ||||
def stub_timer(fm, func, title=None): | def stub_timer(fm, func, title=None): | ||||
func() | func() | ||||
def _timer(fm, func, title=None, displayall=False): | def _timer(fm, func, title=None, displayall=False): | ||||
gc.collect() | gc.collect() | ||||
results = [] | results = [] | ||||
if cstop - begin > 3 and count >= 100: | if cstop - begin > 3 and count >= 100: | ||||
break | break | ||||
if cstop - begin > 10 and count >= 3: | if cstop - begin > 10 and count >= 3: | ||||
break | break | ||||
fm.startitem() | fm.startitem() | ||||
if title: | if title: | ||||
fm.write('title', '! %s\n', title) | fm.write(b'title', b'! %s\n', title) | ||||
if r: | if r: | ||||
fm.write('result', '! result: %s\n', r) | fm.write(b'result', b'! result: %s\n', r) | ||||
def display(role, entry): | def display(role, entry): | ||||
prefix = '' | prefix = b'' | ||||
if role != 'best': | if role != b'best': | ||||
prefix = '%s.' % role | prefix = b'%s.' % role | ||||
fm.plain('!') | fm.plain(b'!') | ||||
fm.write(prefix + 'wall', ' wall %f', entry[0]) | fm.write(prefix + b'wall', b' wall %f', entry[0]) | ||||
fm.write(prefix + 'comb', ' comb %f', entry[1] + entry[2]) | fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2]) | ||||
fm.write(prefix + 'user', ' user %f', entry[1]) | fm.write(prefix + b'user', b' user %f', entry[1]) | ||||
fm.write(prefix + 'sys', ' sys %f', entry[2]) | fm.write(prefix + b'sys', b' sys %f', entry[2]) | ||||
fm.write(prefix + 'count', ' (%s of %d)', role, count) | fm.write(prefix + b'count', b' (%s of %d)', role, count) | ||||
fm.plain('\n') | fm.plain(b'\n') | ||||
results.sort() | results.sort() | ||||
min_val = results[0] | min_val = results[0] | ||||
display('best', min_val) | display(b'best', min_val) | ||||
if displayall: | if displayall: | ||||
max_val = results[-1] | max_val = results[-1] | ||||
display('max', max_val) | display(b'max', max_val) | ||||
avg = tuple([sum(x) / count for x in zip(*results)]) | avg = tuple([sum(x) / count for x in zip(*results)]) | ||||
display('avg', avg) | display(b'avg', avg) | ||||
median = results[len(results) // 2] | median = results[len(results) // 2] | ||||
display('median', median) | display(b'median', median) | ||||
# utilities for historical portability | # utilities for historical portability | ||||
def getint(ui, section, name, default): | def getint(ui, section, name, default): | ||||
# for "historical portability": | # for "historical portability": | ||||
# ui.configint has been available since 1.9 (or fa2b596db182) | # ui.configint has been available since 1.9 (or fa2b596db182) | ||||
v = ui.config(section, name, None) | v = ui.config(section, name, None) | ||||
if v is None: | if v is None: | ||||
return default | return default | ||||
try: | try: | ||||
return int(v) | return int(v) | ||||
except ValueError: | except ValueError: | ||||
raise error.ConfigError(("%s.%s is not an integer ('%s')") | raise error.ConfigError((b"%s.%s is not an integer ('%s')") | ||||
% (section, name, v)) | % (section, name, v)) | ||||
def safeattrsetter(obj, name, ignoremissing=False): | def safeattrsetter(obj, name, ignoremissing=False): | ||||
"""Ensure that 'obj' has 'name' attribute before subsequent setattr | """Ensure that 'obj' has 'name' attribute before subsequent setattr | ||||
This function is aborted, if 'obj' doesn't have 'name' attribute | This function is aborted, if 'obj' doesn't have 'name' attribute | ||||
at runtime. This avoids overlooking removal of an attribute, which | at runtime. This avoids overlooking removal of an attribute, which | ||||
breaks assumption of performance measurement, in the future. | breaks assumption of performance measurement, in the future. | ||||
This function returns the object to (1) assign a new value, and | This function returns the object to (1) assign a new value, and | ||||
(2) restore an original value to the attribute. | (2) restore an original value to the attribute. | ||||
If 'ignoremissing' is true, missing 'name' attribute doesn't cause | If 'ignoremissing' is true, missing 'name' attribute doesn't cause | ||||
abortion, and this function returns None. This is useful to | abortion, and this function returns None. This is useful to | ||||
examine an attribute, which isn't ensured in all Mercurial | examine an attribute, which isn't ensured in all Mercurial | ||||
versions. | versions. | ||||
""" | """ | ||||
if not util.safehasattr(obj, name): | if not util.safehasattr(obj, name): | ||||
if ignoremissing: | if ignoremissing: | ||||
return None | return None | ||||
raise error.Abort(("missing attribute %s of %s might break assumption" | raise error.Abort((b"missing attribute %s of %s might break assumption" | ||||
" of performance measurement") % (name, obj)) | b" of performance measurement") % (name, obj)) | ||||
origvalue = getattr(obj, name) | origvalue = getattr(obj, name) | ||||
class attrutil(object): | class attrutil(object): | ||||
def set(self, newvalue): | def set(self, newvalue): | ||||
setattr(obj, name, newvalue) | setattr(obj, name, newvalue) | ||||
def restore(self): | def restore(self): | ||||
setattr(obj, name, origvalue) | setattr(obj, name, origvalue) | ||||
for mod in (branchmap, repoview): | for mod in (branchmap, repoview): | ||||
subsettable = getattr(mod, 'subsettable', None) | subsettable = getattr(mod, 'subsettable', None) | ||||
if subsettable: | if subsettable: | ||||
return subsettable | return subsettable | ||||
# bisecting in bcee63733aad::59a9f18d4587 can reach here (both | # bisecting in bcee63733aad::59a9f18d4587 can reach here (both | ||||
# branchmap and repoview modules exist, but subsettable attribute | # branchmap and repoview modules exist, but subsettable attribute | ||||
# doesn't) | # doesn't) | ||||
raise error.Abort(("perfbranchmap not available with this Mercurial"), | raise error.Abort((b"perfbranchmap not available with this Mercurial"), | ||||
hint="use 2.5 or later") | hint=b"use 2.5 or later") | ||||
def getsvfs(repo): | def getsvfs(repo): | ||||
"""Return appropriate object to access files under .hg/store | """Return appropriate object to access files under .hg/store | ||||
""" | """ | ||||
# for "historical portability": | # for "historical portability": | ||||
# repo.svfs has been available since 2.3 (or 7034365089bf) | # repo.svfs has been available since 2.3 (or 7034365089bf) | ||||
svfs = getattr(repo, 'svfs', None) | svfs = getattr(repo, 'svfs', None) | ||||
if svfs: | if svfs: | ||||
if vfs: | if vfs: | ||||
return vfs | return vfs | ||||
else: | else: | ||||
return getattr(repo, 'opener') | return getattr(repo, 'opener') | ||||
def repocleartagscachefunc(repo): | def repocleartagscachefunc(repo): | ||||
"""Return the function to clear tags cache according to repo internal API | """Return the function to clear tags cache according to repo internal API | ||||
""" | """ | ||||
if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525) | if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525) | ||||
# in this case, setattr(repo, '_tagscache', None) or so isn't | # in this case, setattr(repo, '_tagscache', None) or so isn't | ||||
# correct way to clear tags cache, because existing code paths | # correct way to clear tags cache, because existing code paths | ||||
# expect _tagscache to be a structured object. | # expect _tagscache to be a structured object. | ||||
def clearcache(): | def clearcache(): | ||||
# _tagscache has been filteredpropertycache since 2.5 (or | # _tagscache has been filteredpropertycache since 2.5 (or | ||||
# 98c867ac1330), and delattr() can't work in such case | # 98c867ac1330), and delattr() can't work in such case | ||||
if '_tagscache' in vars(repo): | if b'_tagscache' in vars(repo): | ||||
del repo.__dict__['_tagscache'] | del repo.__dict__[b'_tagscache'] | ||||
return clearcache | return clearcache | ||||
repotags = safeattrsetter(repo, '_tags', ignoremissing=True) | repotags = safeattrsetter(repo, b'_tags', ignoremissing=True) | ||||
if repotags: # since 1.4 (or 5614a628d173) | if repotags: # since 1.4 (or 5614a628d173) | ||||
return lambda : repotags.set(None) | return lambda : repotags.set(None) | ||||
repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True) | repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True) | ||||
if repotagscache: # since 0.6 (or d7df759d0e97) | if repotagscache: # since 0.6 (or d7df759d0e97) | ||||
return lambda : repotagscache.set(None) | return lambda : repotagscache.set(None) | ||||
# Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches | # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches | ||||
# this point, but it isn't so problematic, because: | # this point, but it isn't so problematic, because: | ||||
# - repo.tags of such Mercurial isn't "callable", and repo.tags() | # - repo.tags of such Mercurial isn't "callable", and repo.tags() | ||||
# in perftags() causes failure soon | # in perftags() causes failure soon | ||||
# - perf.py itself has been available since 1.1 (or eb240755386d) | # - perf.py itself has been available since 1.1 (or eb240755386d) | ||||
raise error.Abort(("tags API of this hg command is unknown")) | raise error.Abort((b"tags API of this hg command is unknown")) | ||||
# utilities to clear cache | # utilities to clear cache | ||||
def clearfilecache(repo, attrname): | def clearfilecache(repo, attrname): | ||||
unfi = repo.unfiltered() | unfi = repo.unfiltered() | ||||
if attrname in vars(unfi): | if attrname in vars(unfi): | ||||
delattr(unfi, attrname) | delattr(unfi, attrname) | ||||
unfi._filecache.pop(attrname, None) | unfi._filecache.pop(attrname, None) | ||||
# perf commands | # perf commands | ||||
@command('perfwalk', formatteropts) | @command(b'perfwalk', formatteropts) | ||||
def perfwalk(ui, repo, *pats, **opts): | def perfwalk(ui, repo, *pats, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
m = scmutil.match(repo[None], pats, {}) | m = scmutil.match(repo[None], pats, {}) | ||||
timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True, | timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True, | ||||
ignored=False)))) | ignored=False)))) | ||||
fm.end() | fm.end() | ||||
@command('perfannotate', formatteropts) | @command(b'perfannotate', formatteropts) | ||||
def perfannotate(ui, repo, f, **opts): | def perfannotate(ui, repo, f, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
fc = repo['.'][f] | fc = repo[b'.'][f] | ||||
timer(lambda: len(fc.annotate(True))) | timer(lambda: len(fc.annotate(True))) | ||||
fm.end() | fm.end() | ||||
@command('perfstatus', | @command(b'perfstatus', | ||||
[('u', 'unknown', False, | [(b'u', b'unknown', False, | ||||
'ask status to look for unknown files')] + formatteropts) | b'ask status to look for unknown files')] + formatteropts) | ||||
def perfstatus(ui, repo, **opts): | def perfstatus(ui, repo, **opts): | ||||
#m = match.always(repo.root, repo.getcwd()) | #m = match.always(repo.root, repo.getcwd()) | ||||
#timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False, | #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False, | ||||
# False)))) | # False)))) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
timer(lambda: sum(map(len, repo.status(unknown=opts['unknown'])))) | timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown'])))) | ||||
fm.end() | fm.end() | ||||
@command('perfaddremove', formatteropts) | @command(b'perfaddremove', formatteropts) | ||||
def perfaddremove(ui, repo, **opts): | def perfaddremove(ui, repo, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
try: | try: | ||||
oldquiet = repo.ui.quiet | oldquiet = repo.ui.quiet | ||||
repo.ui.quiet = True | repo.ui.quiet = True | ||||
matcher = scmutil.match(repo[None]) | matcher = scmutil.match(repo[None]) | ||||
opts['dry_run'] = True | opts[b'dry_run'] = True | ||||
timer(lambda: scmutil.addremove(repo, matcher, "", opts)) | timer(lambda: scmutil.addremove(repo, matcher, b"", opts)) | ||||
finally: | finally: | ||||
repo.ui.quiet = oldquiet | repo.ui.quiet = oldquiet | ||||
fm.end() | fm.end() | ||||
def clearcaches(cl): | def clearcaches(cl): | ||||
# behave somewhat consistently across internal API changes | # behave somewhat consistently across internal API changes | ||||
if util.safehasattr(cl, 'clearcaches'): | if util.safehasattr(cl, b'clearcaches'): | ||||
cl.clearcaches() | cl.clearcaches() | ||||
elif util.safehasattr(cl, '_nodecache'): | elif util.safehasattr(cl, b'_nodecache'): | ||||
from mercurial.node import nullid, nullrev | from mercurial.node import nullid, nullrev | ||||
cl._nodecache = {nullid: nullrev} | cl._nodecache = {nullid: nullrev} | ||||
cl._nodepos = None | cl._nodepos = None | ||||
@command('perfheads', formatteropts) | @command(b'perfheads', formatteropts) | ||||
def perfheads(ui, repo, **opts): | def perfheads(ui, repo, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
cl = repo.changelog | cl = repo.changelog | ||||
def d(): | def d(): | ||||
len(cl.headrevs()) | len(cl.headrevs()) | ||||
clearcaches(cl) | clearcaches(cl) | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perftags', formatteropts) | @command(b'perftags', formatteropts) | ||||
def perftags(ui, repo, **opts): | def perftags(ui, repo, **opts): | ||||
import mercurial.changelog | import mercurial.changelog | ||||
import mercurial.manifest | import mercurial.manifest | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
svfs = getsvfs(repo) | svfs = getsvfs(repo) | ||||
repocleartagscache = repocleartagscachefunc(repo) | repocleartagscache = repocleartagscachefunc(repo) | ||||
def t(): | def t(): | ||||
repo.changelog = mercurial.changelog.changelog(svfs) | repo.changelog = mercurial.changelog.changelog(svfs) | ||||
repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo) | repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo) | ||||
repocleartagscache() | repocleartagscache() | ||||
return len(repo.tags()) | return len(repo.tags()) | ||||
timer(t) | timer(t) | ||||
fm.end() | fm.end() | ||||
@command('perfancestors', formatteropts) | @command(b'perfancestors', formatteropts) | ||||
def perfancestors(ui, repo, **opts): | def perfancestors(ui, repo, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
heads = repo.changelog.headrevs() | heads = repo.changelog.headrevs() | ||||
def d(): | def d(): | ||||
for a in repo.changelog.ancestors(heads): | for a in repo.changelog.ancestors(heads): | ||||
pass | pass | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfancestorset', formatteropts) | @command(b'perfancestorset', formatteropts) | ||||
def perfancestorset(ui, repo, revset, **opts): | def perfancestorset(ui, repo, revset, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
revs = repo.revs(revset) | revs = repo.revs(revset) | ||||
heads = repo.changelog.headrevs() | heads = repo.changelog.headrevs() | ||||
def d(): | def d(): | ||||
s = repo.changelog.ancestors(heads) | s = repo.changelog.ancestors(heads) | ||||
for rev in revs: | for rev in revs: | ||||
rev in s | rev in s | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfbookmarks', formatteropts) | @command(b'perfbookmarks', formatteropts) | ||||
def perfbookmarks(ui, repo, **opts): | def perfbookmarks(ui, repo, **opts): | ||||
"""benchmark parsing bookmarks from disk to memory""" | """benchmark parsing bookmarks from disk to memory""" | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
def d(): | def d(): | ||||
clearfilecache(repo, '_bookmarks') | clearfilecache(repo, b'_bookmarks') | ||||
repo._bookmarks | repo._bookmarks | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfbundleread', formatteropts, 'BUNDLE') | @command(b'perfbundleread', formatteropts, b'BUNDLE') | ||||
def perfbundleread(ui, repo, bundlepath, **opts): | def perfbundleread(ui, repo, bundlepath, **opts): | ||||
"""Benchmark reading of bundle files. | """Benchmark reading of bundle files. | ||||
This command is meant to isolate the I/O part of bundle reading as | This command is meant to isolate the I/O part of bundle reading as | ||||
much as possible. | much as possible. | ||||
""" | """ | ||||
from mercurial import ( | from mercurial import ( | ||||
bundle2, | bundle2, | ||||
exchange, | exchange, | ||||
streamclone, | streamclone, | ||||
) | ) | ||||
def makebench(fn): | def makebench(fn): | ||||
def run(): | def run(): | ||||
with open(bundlepath, 'rb') as fh: | with open(bundlepath, b'rb') as fh: | ||||
bundle = exchange.readbundle(ui, fh, bundlepath) | bundle = exchange.readbundle(ui, fh, bundlepath) | ||||
fn(bundle) | fn(bundle) | ||||
return run | return run | ||||
def makereadnbytes(size): | def makereadnbytes(size): | ||||
def run(): | def run(): | ||||
with open(bundlepath, 'rb') as fh: | with open(bundlepath, b'rb') as fh: | ||||
bundle = exchange.readbundle(ui, fh, bundlepath) | bundle = exchange.readbundle(ui, fh, bundlepath) | ||||
while bundle.read(size): | while bundle.read(size): | ||||
pass | pass | ||||
return run | return run | ||||
def makestdioread(size): | def makestdioread(size): | ||||
def run(): | def run(): | ||||
with open(bundlepath, 'rb') as fh: | with open(bundlepath, b'rb') as fh: | ||||
while fh.read(size): | while fh.read(size): | ||||
pass | pass | ||||
return run | return run | ||||
# bundle1 | # bundle1 | ||||
def deltaiter(bundle): | def deltaiter(bundle): | ||||
pass | pass | ||||
def seek(bundle): | def seek(bundle): | ||||
for part in bundle.iterparts(seekable=True): | for part in bundle.iterparts(seekable=True): | ||||
part.seek(0, os.SEEK_END) | part.seek(0, os.SEEK_END) | ||||
def makepartreadnbytes(size): | def makepartreadnbytes(size): | ||||
def run(): | def run(): | ||||
with open(bundlepath, 'rb') as fh: | with open(bundlepath, b'rb') as fh: | ||||
bundle = exchange.readbundle(ui, fh, bundlepath) | bundle = exchange.readbundle(ui, fh, bundlepath) | ||||
for part in bundle.iterparts(): | for part in bundle.iterparts(): | ||||
while part.read(size): | while part.read(size): | ||||
pass | pass | ||||
return run | return run | ||||
benches = [ | benches = [ | ||||
(makestdioread(8192), 'read(8k)'), | (makestdioread(8192), b'read(8k)'), | ||||
(makestdioread(16384), 'read(16k)'), | (makestdioread(16384), b'read(16k)'), | ||||
(makestdioread(32768), 'read(32k)'), | (makestdioread(32768), b'read(32k)'), | ||||
(makestdioread(131072), 'read(128k)'), | (makestdioread(131072), b'read(128k)'), | ||||
] | ] | ||||
with open(bundlepath, 'rb') as fh: | with open(bundlepath, b'rb') as fh: | ||||
bundle = exchange.readbundle(ui, fh, bundlepath) | bundle = exchange.readbundle(ui, fh, bundlepath) | ||||
if isinstance(bundle, changegroup.cg1unpacker): | if isinstance(bundle, changegroup.cg1unpacker): | ||||
benches.extend([ | benches.extend([ | ||||
(makebench(deltaiter), 'cg1 deltaiter()'), | (makebench(deltaiter), b'cg1 deltaiter()'), | ||||
(makebench(iterchunks), 'cg1 getchunks()'), | (makebench(iterchunks), b'cg1 getchunks()'), | ||||
(makereadnbytes(8192), 'cg1 read(8k)'), | (makereadnbytes(8192), b'cg1 read(8k)'), | ||||
(makereadnbytes(16384), 'cg1 read(16k)'), | (makereadnbytes(16384), b'cg1 read(16k)'), | ||||
(makereadnbytes(32768), 'cg1 read(32k)'), | (makereadnbytes(32768), b'cg1 read(32k)'), | ||||
(makereadnbytes(131072), 'cg1 read(128k)'), | (makereadnbytes(131072), b'cg1 read(128k)'), | ||||
]) | ]) | ||||
elif isinstance(bundle, bundle2.unbundle20): | elif isinstance(bundle, bundle2.unbundle20): | ||||
benches.extend([ | benches.extend([ | ||||
(makebench(forwardchunks), 'bundle2 forwardchunks()'), | (makebench(forwardchunks), b'bundle2 forwardchunks()'), | ||||
(makebench(iterparts), 'bundle2 iterparts()'), | (makebench(iterparts), b'bundle2 iterparts()'), | ||||
(makebench(iterpartsseekable), 'bundle2 iterparts() seekable'), | (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'), | ||||
(makebench(seek), 'bundle2 part seek()'), | (makebench(seek), b'bundle2 part seek()'), | ||||
(makepartreadnbytes(8192), 'bundle2 part read(8k)'), | (makepartreadnbytes(8192), b'bundle2 part read(8k)'), | ||||
(makepartreadnbytes(16384), 'bundle2 part read(16k)'), | (makepartreadnbytes(16384), b'bundle2 part read(16k)'), | ||||
(makepartreadnbytes(32768), 'bundle2 part read(32k)'), | (makepartreadnbytes(32768), b'bundle2 part read(32k)'), | ||||
(makepartreadnbytes(131072), 'bundle2 part read(128k)'), | (makepartreadnbytes(131072), b'bundle2 part read(128k)'), | ||||
]) | ]) | ||||
elif isinstance(bundle, streamclone.streamcloneapplier): | elif isinstance(bundle, streamclone.streamcloneapplier): | ||||
raise error.Abort('stream clone bundles not supported') | raise error.Abort(b'stream clone bundles not supported') | ||||
else: | else: | ||||
raise error.Abort('unhandled bundle type: %s' % type(bundle)) | raise error.Abort(b'unhandled bundle type: %s' % type(bundle)) | ||||
for fn, title in benches: | for fn, title in benches: | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
timer(fn, title=title) | timer(fn, title=title) | ||||
fm.end() | fm.end() | ||||
@command('perfchangegroupchangelog', formatteropts + | @command(b'perfchangegroupchangelog', formatteropts + | ||||
[('', 'version', '02', 'changegroup version'), | [(b'', b'version', b'02', b'changegroup version'), | ||||
('r', 'rev', '', 'revisions to add to changegroup')]) | (b'r', b'rev', b'', b'revisions to add to changegroup')]) | ||||
def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts): | def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts): | ||||
"""Benchmark producing a changelog group for a changegroup. | """Benchmark producing a changelog group for a changegroup. | ||||
This measures the time spent processing the changelog during a | This measures the time spent processing the changelog during a | ||||
bundle operation. This occurs during `hg bundle` and on a server | bundle operation. This occurs during `hg bundle` and on a server | ||||
processing a `getbundle` wire protocol request (handles clones | processing a `getbundle` wire protocol request (handles clones | ||||
and pull requests). | and pull requests). | ||||
By default, all revisions are added to the changegroup. | By default, all revisions are added to the changegroup. | ||||
""" | """ | ||||
cl = repo.changelog | cl = repo.changelog | ||||
nodes = [cl.lookup(r) for r in repo.revs(rev or 'all()')] | nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')] | ||||
bundler = changegroup.getbundler(version, repo) | bundler = changegroup.getbundler(version, repo) | ||||
def d(): | def d(): | ||||
state, chunks = bundler._generatechangelog(cl, nodes) | state, chunks = bundler._generatechangelog(cl, nodes) | ||||
for chunk in chunks: | for chunk in chunks: | ||||
pass | pass | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
# Terminal printing can interfere with timing. So disable it. | # Terminal printing can interfere with timing. So disable it. | ||||
with ui.configoverride({('progress', 'disable'): True}): | with ui.configoverride({(b'progress', b'disable'): True}): | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfdirs', formatteropts) | @command(b'perfdirs', formatteropts) | ||||
def perfdirs(ui, repo, **opts): | def perfdirs(ui, repo, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
dirstate = repo.dirstate | dirstate = repo.dirstate | ||||
'a' in dirstate | b'a' in dirstate | ||||
def d(): | def d(): | ||||
dirstate.hasdir('a') | dirstate.hasdir(b'a') | ||||
del dirstate._map._dirs | del dirstate._map._dirs | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfdirstate', formatteropts) | @command(b'perfdirstate', formatteropts) | ||||
def perfdirstate(ui, repo, **opts): | def perfdirstate(ui, repo, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
"a" in repo.dirstate | b"a" in repo.dirstate | ||||
def d(): | def d(): | ||||
repo.dirstate.invalidate() | repo.dirstate.invalidate() | ||||
"a" in repo.dirstate | b"a" in repo.dirstate | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfdirstatedirs', formatteropts) | @command(b'perfdirstatedirs', formatteropts) | ||||
def perfdirstatedirs(ui, repo, **opts): | def perfdirstatedirs(ui, repo, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
"a" in repo.dirstate | b"a" in repo.dirstate | ||||
def d(): | def d(): | ||||
repo.dirstate.hasdir("a") | repo.dirstate.hasdir(b"a") | ||||
del repo.dirstate._map._dirs | del repo.dirstate._map._dirs | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfdirstatefoldmap', formatteropts) | @command(b'perfdirstatefoldmap', formatteropts) | ||||
def perfdirstatefoldmap(ui, repo, **opts): | def perfdirstatefoldmap(ui, repo, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
dirstate = repo.dirstate | dirstate = repo.dirstate | ||||
'a' in dirstate | b'a' in dirstate | ||||
def d(): | def d(): | ||||
dirstate._map.filefoldmap.get('a') | dirstate._map.filefoldmap.get(b'a') | ||||
del dirstate._map.filefoldmap | del dirstate._map.filefoldmap | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfdirfoldmap', formatteropts) | @command(b'perfdirfoldmap', formatteropts) | ||||
def perfdirfoldmap(ui, repo, **opts): | def perfdirfoldmap(ui, repo, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
dirstate = repo.dirstate | dirstate = repo.dirstate | ||||
'a' in dirstate | b'a' in dirstate | ||||
def d(): | def d(): | ||||
dirstate._map.dirfoldmap.get('a') | dirstate._map.dirfoldmap.get(b'a') | ||||
del dirstate._map.dirfoldmap | del dirstate._map.dirfoldmap | ||||
del dirstate._map._dirs | del dirstate._map._dirs | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfdirstatewrite', formatteropts) | @command(b'perfdirstatewrite', formatteropts) | ||||
def perfdirstatewrite(ui, repo, **opts): | def perfdirstatewrite(ui, repo, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
ds = repo.dirstate | ds = repo.dirstate | ||||
"a" in ds | b"a" in ds | ||||
def d(): | def d(): | ||||
ds._dirty = True | ds._dirty = True | ||||
ds.write(repo.currenttransaction()) | ds.write(repo.currenttransaction()) | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfmergecalculate', | @command(b'perfmergecalculate', | ||||
[('r', 'rev', '.', 'rev to merge against')] + formatteropts) | [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts) | ||||
def perfmergecalculate(ui, repo, rev, **opts): | def perfmergecalculate(ui, repo, rev, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
wctx = repo[None] | wctx = repo[None] | ||||
rctx = scmutil.revsingle(repo, rev, rev) | rctx = scmutil.revsingle(repo, rev, rev) | ||||
ancestor = wctx.ancestor(rctx) | ancestor = wctx.ancestor(rctx) | ||||
# we don't want working dir files to be stat'd in the benchmark, so prime | # we don't want working dir files to be stat'd in the benchmark, so prime | ||||
# that cache | # that cache | ||||
wctx.dirty() | wctx.dirty() | ||||
def d(): | def d(): | ||||
# acceptremote is True because we don't want prompts in the middle of | # acceptremote is True because we don't want prompts in the middle of | ||||
# our benchmark | # our benchmark | ||||
merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False, | merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False, | ||||
acceptremote=True, followcopies=True) | acceptremote=True, followcopies=True) | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfpathcopies', [], "REV REV") | @command(b'perfpathcopies', [], b"REV REV") | ||||
def perfpathcopies(ui, repo, rev1, rev2, **opts): | def perfpathcopies(ui, repo, rev1, rev2, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
ctx1 = scmutil.revsingle(repo, rev1, rev1) | ctx1 = scmutil.revsingle(repo, rev1, rev1) | ||||
ctx2 = scmutil.revsingle(repo, rev2, rev2) | ctx2 = scmutil.revsingle(repo, rev2, rev2) | ||||
def d(): | def d(): | ||||
copies.pathcopies(ctx1, ctx2) | copies.pathcopies(ctx1, ctx2) | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfphases', | @command(b'perfphases', | ||||
[('', 'full', False, 'include file reading time too'), | [(b'', b'full', False, b'include file reading time too'), | ||||
], "") | ], b"") | ||||
def perfphases(ui, repo, **opts): | def perfphases(ui, repo, **opts): | ||||
"""benchmark phasesets computation""" | """benchmark phasesets computation""" | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
_phases = repo._phasecache | _phases = repo._phasecache | ||||
full = opts.get('full') | full = opts.get(b'full') | ||||
def d(): | def d(): | ||||
phases = _phases | phases = _phases | ||||
if full: | if full: | ||||
clearfilecache(repo, '_phasecache') | clearfilecache(repo, b'_phasecache') | ||||
phases = repo._phasecache | phases = repo._phasecache | ||||
phases.invalidate() | phases.invalidate() | ||||
phases.loadphaserevs(repo) | phases.loadphaserevs(repo) | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfphasesremote', | @command(b'perfphasesremote', | ||||
[], "[DEST]") | [], b"[DEST]") | ||||
def perfphasesremote(ui, repo, dest=None, **opts): | def perfphasesremote(ui, repo, dest=None, **opts): | ||||
"""benchmark time needed to analyse phases of the remote server""" | """benchmark time needed to analyse phases of the remote server""" | ||||
from mercurial.node import ( | from mercurial.node import ( | ||||
bin, | bin, | ||||
) | ) | ||||
from mercurial import ( | from mercurial import ( | ||||
exchange, | exchange, | ||||
hg, | hg, | ||||
phases, | phases, | ||||
) | ) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
path = ui.paths.getpath(dest, default=('default-push', 'default')) | path = ui.paths.getpath(dest, default=(b'default-push', b'default')) | ||||
if not path: | if not path: | ||||
raise error.Abort(('default repository not configured!'), | raise error.Abort((b'default repository not configured!'), | ||||
hint=("see 'hg help config.paths'")) | hint=(b"see 'hg help config.paths'")) | ||||
dest = path.pushloc or path.loc | dest = path.pushloc or path.loc | ||||
branches = (path.branch, opts.get('branch') or []) | branches = (path.branch, opts.get(b'branch') or []) | ||||
ui.status(('analysing phase of %s\n') % util.hidepassword(dest)) | ui.status((b'analysing phase of %s\n') % util.hidepassword(dest)) | ||||
revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) | revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev')) | ||||
other = hg.peer(repo, opts, dest) | other = hg.peer(repo, opts, dest) | ||||
# easier to perform discovery through the operation | # easier to perform discovery through the operation | ||||
op = exchange.pushoperation(repo, other) | op = exchange.pushoperation(repo, other) | ||||
exchange._pushdiscoverychangeset(op) | exchange._pushdiscoverychangeset(op) | ||||
remotesubset = op.fallbackheads | remotesubset = op.fallbackheads | ||||
with other.commandexecutor() as e: | with other.commandexecutor() as e: | ||||
remotephases = e.callcommand('listkeys', | remotephases = e.callcommand(b'listkeys', | ||||
{'namespace': 'phases'}).result() | {b'namespace': b'phases'}).result() | ||||
del other | del other | ||||
publishing = remotephases.get('publishing', False) | publishing = remotephases.get(b'publishing', False) | ||||
if publishing: | if publishing: | ||||
ui.status(('publishing: yes\n')) | ui.status((b'publishing: yes\n')) | ||||
else: | else: | ||||
ui.status(('publishing: no\n')) | ui.status((b'publishing: no\n')) | ||||
nodemap = repo.changelog.nodemap | nodemap = repo.changelog.nodemap | ||||
nonpublishroots = 0 | nonpublishroots = 0 | ||||
for nhex, phase in remotephases.iteritems(): | for nhex, phase in remotephases.iteritems(): | ||||
if nhex == 'publishing': # ignore data related to publish option | if nhex == b'publishing': # ignore data related to publish option | ||||
continue | continue | ||||
node = bin(nhex) | node = bin(nhex) | ||||
if node in nodemap and int(phase): | if node in nodemap and int(phase): | ||||
nonpublishroots += 1 | nonpublishroots += 1 | ||||
ui.status(('number of roots: %d\n') % len(remotephases)) | ui.status((b'number of roots: %d\n') % len(remotephases)) | ||||
ui.status(('number of known non public roots: %d\n') % nonpublishroots) | ui.status((b'number of known non public roots: %d\n') % nonpublishroots) | ||||
def d(): | def d(): | ||||
phases.remotephasessummary(repo, | phases.remotephasessummary(repo, | ||||
remotesubset, | remotesubset, | ||||
remotephases) | remotephases) | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfmanifest',[ | @command(b'perfmanifest',[ | ||||
('m', 'manifest-rev', False, 'Look up a manifest node revision'), | (b'm', b'manifest-rev', False, b'Look up a manifest node revision'), | ||||
('', 'clear-disk', False, 'clear on-disk caches too'), | (b'', b'clear-disk', False, b'clear on-disk caches too'), | ||||
], 'REV|NODE') | ], b'REV|NODE') | ||||
def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts): | def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts): | ||||
"""benchmark the time to read a manifest from disk and return a usable | """benchmark the time to read a manifest from disk and return a usable | ||||
dict-like object | dict-like object | ||||
Manifest caches are cleared before retrieval.""" | Manifest caches are cleared before retrieval.""" | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
if not manifest_rev: | if not manifest_rev: | ||||
ctx = scmutil.revsingle(repo, rev, rev) | ctx = scmutil.revsingle(repo, rev, rev) | ||||
t = ctx.manifestnode() | t = ctx.manifestnode() | ||||
else: | else: | ||||
from mercurial.node import bin | from mercurial.node import bin | ||||
if len(rev) == 40: | if len(rev) == 40: | ||||
t = bin(rev) | t = bin(rev) | ||||
else: | else: | ||||
try: | try: | ||||
rev = int(rev) | rev = int(rev) | ||||
if util.safehasattr(repo.manifestlog, 'getstorage'): | if util.safehasattr(repo.manifestlog, b'getstorage'): | ||||
t = repo.manifestlog.getstorage(b'').node(rev) | t = repo.manifestlog.getstorage(b'').node(rev) | ||||
else: | else: | ||||
t = repo.manifestlog._revlog.lookup(rev) | t = repo.manifestlog._revlog.lookup(rev) | ||||
except ValueError: | except ValueError: | ||||
raise error.Abort('manifest revision must be integer or full ' | raise error.Abort(b'manifest revision must be integer or full ' | ||||
'node') | b'node') | ||||
def d(): | def d(): | ||||
repo.manifestlog.clearcaches(clear_persisted_data=clear_disk) | repo.manifestlog.clearcaches(clear_persisted_data=clear_disk) | ||||
repo.manifestlog[t].read() | repo.manifestlog[t].read() | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfchangeset', formatteropts) | @command(b'perfchangeset', formatteropts) | ||||
def perfchangeset(ui, repo, rev, **opts): | def perfchangeset(ui, repo, rev, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
n = scmutil.revsingle(repo, rev).node() | n = scmutil.revsingle(repo, rev).node() | ||||
def d(): | def d(): | ||||
repo.changelog.read(n) | repo.changelog.read(n) | ||||
#repo.changelog._cache = None | #repo.changelog._cache = None | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfindex', formatteropts) | @command(b'perfindex', formatteropts) | ||||
def perfindex(ui, repo, **opts): | def perfindex(ui, repo, **opts): | ||||
import mercurial.revlog | import mercurial.revlog | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg | ||||
n = repo["tip"].node() | n = repo[b"tip"].node() | ||||
svfs = getsvfs(repo) | svfs = getsvfs(repo) | ||||
def d(): | def d(): | ||||
cl = mercurial.revlog.revlog(svfs, "00changelog.i") | cl = mercurial.revlog.revlog(svfs, b"00changelog.i") | ||||
cl.rev(n) | cl.rev(n) | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfstartup', formatteropts) | @command(b'perfstartup', formatteropts) | ||||
def perfstartup(ui, repo, **opts): | def perfstartup(ui, repo, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
cmd = sys.argv[0] | cmd = sys.argv[0] | ||||
def d(): | def d(): | ||||
if os.name != 'nt': | if os.name != b'nt': | ||||
os.system("HGRCPATH= %s version -q > /dev/null" % cmd) | os.system(b"HGRCPATH= %s version -q > /dev/null" % cmd) | ||||
else: | else: | ||||
os.environ['HGRCPATH'] = ' ' | os.environ[b'HGRCPATH'] = b' ' | ||||
os.system("%s version -q > NUL" % cmd) | os.system(b"%s version -q > NUL" % cmd) | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfparents', formatteropts) | @command(b'perfparents', formatteropts) | ||||
def perfparents(ui, repo, **opts): | def perfparents(ui, repo, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
# control the number of commits perfparents iterates over | # control the number of commits perfparents iterates over | ||||
# experimental config: perf.parentscount | # experimental config: perf.parentscount | ||||
count = getint(ui, "perf", "parentscount", 1000) | count = getint(ui, b"perf", b"parentscount", 1000) | ||||
if len(repo.changelog) < count: | if len(repo.changelog) < count: | ||||
raise error.Abort("repo needs %d commits for this test" % count) | raise error.Abort(b"repo needs %d commits for this test" % count) | ||||
repo = repo.unfiltered() | repo = repo.unfiltered() | ||||
nl = [repo.changelog.node(i) for i in xrange(count)] | nl = [repo.changelog.node(i) for i in xrange(count)] | ||||
def d(): | def d(): | ||||
for n in nl: | for n in nl: | ||||
repo.changelog.parents(n) | repo.changelog.parents(n) | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfctxfiles', formatteropts) | @command(b'perfctxfiles', formatteropts) | ||||
def perfctxfiles(ui, repo, x, **opts): | def perfctxfiles(ui, repo, x, **opts): | ||||
x = int(x) | x = int(x) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
def d(): | def d(): | ||||
len(repo[x].files()) | len(repo[x].files()) | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfrawfiles', formatteropts) | @command(b'perfrawfiles', formatteropts) | ||||
def perfrawfiles(ui, repo, x, **opts): | def perfrawfiles(ui, repo, x, **opts): | ||||
x = int(x) | x = int(x) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
cl = repo.changelog | cl = repo.changelog | ||||
def d(): | def d(): | ||||
len(cl.read(x)[3]) | len(cl.read(x)[3]) | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perflookup', formatteropts) | @command(b'perflookup', formatteropts) | ||||
def perflookup(ui, repo, rev, **opts): | def perflookup(ui, repo, rev, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
timer(lambda: len(repo.lookup(rev))) | timer(lambda: len(repo.lookup(rev))) | ||||
fm.end() | fm.end() | ||||
@command('perflinelogedits', | @command(b'perflinelogedits', | ||||
[('n', 'edits', 10000, 'number of edits'), | [(b'n', b'edits', 10000, b'number of edits'), | ||||
('', 'max-hunk-lines', 10, 'max lines in a hunk'), | (b'', b'max-hunk-lines', 10, b'max lines in a hunk'), | ||||
], norepo=True) | ], norepo=True) | ||||
def perflinelogedits(ui, **opts): | def perflinelogedits(ui, **opts): | ||||
from mercurial import linelog | from mercurial import linelog | ||||
edits = opts['edits'] | edits = opts[b'edits'] | ||||
maxhunklines = opts['max_hunk_lines'] | maxhunklines = opts[b'max_hunk_lines'] | ||||
maxb1 = 100000 | maxb1 = 100000 | ||||
random.seed(0) | random.seed(0) | ||||
randint = random.randint | randint = random.randint | ||||
currentlines = 0 | currentlines = 0 | ||||
arglist = [] | arglist = [] | ||||
for rev in xrange(edits): | for rev in xrange(edits): | ||||
a1 = randint(0, currentlines) | a1 = randint(0, currentlines) | ||||
a2 = randint(a1, min(currentlines, a1 + maxhunklines)) | a2 = randint(a1, min(currentlines, a1 + maxhunklines)) | ||||
b1 = randint(0, maxb1) | b1 = randint(0, maxb1) | ||||
b2 = randint(b1, b1 + maxhunklines) | b2 = randint(b1, b1 + maxhunklines) | ||||
currentlines += (b2 - b1) - (a2 - a1) | currentlines += (b2 - b1) - (a2 - a1) | ||||
arglist.append((rev, a1, a2, b1, b2)) | arglist.append((rev, a1, a2, b1, b2)) | ||||
def d(): | def d(): | ||||
ll = linelog.linelog() | ll = linelog.linelog() | ||||
for args in arglist: | for args in arglist: | ||||
ll.replacelines(*args) | ll.replacelines(*args) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfrevrange', formatteropts) | @command(b'perfrevrange', formatteropts) | ||||
def perfrevrange(ui, repo, *specs, **opts): | def perfrevrange(ui, repo, *specs, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
revrange = scmutil.revrange | revrange = scmutil.revrange | ||||
timer(lambda: len(revrange(repo, specs))) | timer(lambda: len(revrange(repo, specs))) | ||||
fm.end() | fm.end() | ||||
@command('perfnodelookup', formatteropts) | @command(b'perfnodelookup', formatteropts) | ||||
def perfnodelookup(ui, repo, rev, **opts): | def perfnodelookup(ui, repo, rev, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
import mercurial.revlog | import mercurial.revlog | ||||
mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg | ||||
n = scmutil.revsingle(repo, rev).node() | n = scmutil.revsingle(repo, rev).node() | ||||
cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i") | cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i") | ||||
def d(): | def d(): | ||||
cl.rev(n) | cl.rev(n) | ||||
clearcaches(cl) | clearcaches(cl) | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perflog', | @command(b'perflog', | ||||
[('', 'rename', False, 'ask log to follow renames')] + formatteropts) | [(b'', b'rename', False, b'ask log to follow renames') | ||||
] + formatteropts) | |||||
def perflog(ui, repo, rev=None, **opts): | def perflog(ui, repo, rev=None, **opts): | ||||
if rev is None: | if rev is None: | ||||
rev=[] | rev=[] | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
ui.pushbuffer() | ui.pushbuffer() | ||||
timer(lambda: commands.log(ui, repo, rev=rev, date='', user='', | timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'', | ||||
copies=opts.get('rename'))) | copies=opts.get(b'rename'))) | ||||
ui.popbuffer() | ui.popbuffer() | ||||
fm.end() | fm.end() | ||||
@command('perfmoonwalk', formatteropts) | @command(b'perfmoonwalk', formatteropts) | ||||
def perfmoonwalk(ui, repo, **opts): | def perfmoonwalk(ui, repo, **opts): | ||||
"""benchmark walking the changelog backwards | """benchmark walking the changelog backwards | ||||
This also loads the changelog data for each revision in the changelog. | This also loads the changelog data for each revision in the changelog. | ||||
""" | """ | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
def moonwalk(): | def moonwalk(): | ||||
for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1): | for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1): | ||||
ctx = repo[i] | ctx = repo[i] | ||||
ctx.branch() # read changelog data (in addition to the index) | ctx.branch() # read changelog data (in addition to the index) | ||||
timer(moonwalk) | timer(moonwalk) | ||||
fm.end() | fm.end() | ||||
@command('perftemplating', | @command(b'perftemplating', | ||||
[('r', 'rev', [], 'revisions to run the template on'), | [(b'r', b'rev', [], b'revisions to run the template on'), | ||||
] + formatteropts) | ] + formatteropts) | ||||
def perftemplating(ui, repo, testedtemplate=None, **opts): | def perftemplating(ui, repo, testedtemplate=None, **opts): | ||||
"""test the rendering time of a given template""" | """test the rendering time of a given template""" | ||||
if makelogtemplater is None: | if makelogtemplater is None: | ||||
raise error.Abort(("perftemplating not available with this Mercurial"), | raise error.Abort((b"perftemplating not available with this Mercurial"), | ||||
hint="use 4.3 or later") | hint=b"use 4.3 or later") | ||||
nullui = ui.copy() | nullui = ui.copy() | ||||
nullui.fout = open(os.devnull, 'wb') | nullui.fout = open(os.devnull, b'wb') | ||||
nullui.disablepager() | nullui.disablepager() | ||||
revs = opts.get('rev') | revs = opts.get(b'rev') | ||||
if not revs: | if not revs: | ||||
revs = ['all()'] | revs = [b'all()'] | ||||
revs = list(scmutil.revrange(repo, revs)) | revs = list(scmutil.revrange(repo, revs)) | ||||
defaulttemplate = ('{date|shortdate} [{rev}:{node|short}]' | defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]' | ||||
' {author|person}: {desc|firstline}\n') | b' {author|person}: {desc|firstline}\n') | ||||
if testedtemplate is None: | if testedtemplate is None: | ||||
testedtemplate = defaulttemplate | testedtemplate = defaulttemplate | ||||
displayer = makelogtemplater(nullui, repo, testedtemplate) | displayer = makelogtemplater(nullui, repo, testedtemplate) | ||||
def format(): | def format(): | ||||
for r in revs: | for r in revs: | ||||
ctx = repo[r] | ctx = repo[r] | ||||
displayer.show(ctx) | displayer.show(ctx) | ||||
displayer.flush(ctx) | displayer.flush(ctx) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
timer(format) | timer(format) | ||||
fm.end() | fm.end() | ||||
@command('perfcca', formatteropts) | @command(b'perfcca', formatteropts) | ||||
def perfcca(ui, repo, **opts): | def perfcca(ui, repo, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate)) | timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate)) | ||||
fm.end() | fm.end() | ||||
@command('perffncacheload', formatteropts) | @command(b'perffncacheload', formatteropts) | ||||
def perffncacheload(ui, repo, **opts): | def perffncacheload(ui, repo, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
s = repo.store | s = repo.store | ||||
def d(): | def d(): | ||||
s.fncache._load() | s.fncache._load() | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perffncachewrite', formatteropts) | @command(b'perffncachewrite', formatteropts) | ||||
def perffncachewrite(ui, repo, **opts): | def perffncachewrite(ui, repo, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
s = repo.store | s = repo.store | ||||
lock = repo.lock() | lock = repo.lock() | ||||
s.fncache._load() | s.fncache._load() | ||||
tr = repo.transaction('perffncachewrite') | tr = repo.transaction(b'perffncachewrite') | ||||
tr.addbackup('fncache') | tr.addbackup(b'fncache') | ||||
def d(): | def d(): | ||||
s.fncache._dirty = True | s.fncache._dirty = True | ||||
s.fncache.write(tr) | s.fncache.write(tr) | ||||
timer(d) | timer(d) | ||||
tr.close() | tr.close() | ||||
lock.release() | lock.release() | ||||
fm.end() | fm.end() | ||||
@command('perffncacheencode', formatteropts) | @command(b'perffncacheencode', formatteropts) | ||||
def perffncacheencode(ui, repo, **opts): | def perffncacheencode(ui, repo, **opts): | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
s = repo.store | s = repo.store | ||||
s.fncache._load() | s.fncache._load() | ||||
def d(): | def d(): | ||||
for p in s.fncache.entries: | for p in s.fncache.entries: | ||||
s.encode(p) | s.encode(p) | ||||
timer(d) | timer(d) | ||||
pair = q.get() | pair = q.get() | ||||
q.task_done() # for the None one | q.task_done() # for the None one | ||||
with ready: | with ready: | ||||
ready.wait() | ready.wait() | ||||
def _manifestrevision(repo, mnode): | def _manifestrevision(repo, mnode): | ||||
ml = repo.manifestlog | ml = repo.manifestlog | ||||
if util.safehasattr(ml, 'getstorage'): | if util.safehasattr(ml, b'getstorage'): | ||||
store = ml.getstorage(b'') | store = ml.getstorage(b'') | ||||
else: | else: | ||||
store = ml._revlog | store = ml._revlog | ||||
return store.revision(mnode) | return store.revision(mnode) | ||||
@command('perfbdiff', revlogopts + formatteropts + [ | @command(b'perfbdiff', revlogopts + formatteropts + [ | ||||
('', 'count', 1, 'number of revisions to test (when using --startrev)'), | (b'', b'count', 1, b'number of revisions to test (when using --startrev)'), | ||||
('', 'alldata', False, 'test bdiffs for all associated revisions'), | (b'', b'alldata', False, b'test bdiffs for all associated revisions'), | ||||
('', 'threads', 0, 'number of thread to use (disable with 0)'), | (b'', b'threads', 0, b'number of thread to use (disable with 0)'), | ||||
('', 'blocks', False, 'test computing diffs into blocks'), | (b'', b'blocks', False, b'test computing diffs into blocks'), | ||||
('', 'xdiff', False, 'use xdiff algorithm'), | (b'', b'xdiff', False, b'use xdiff algorithm'), | ||||
], | ], | ||||
'-c|-m|FILE REV') | b'-c|-m|FILE REV') | ||||
def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts): | def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts): | ||||
"""benchmark a bdiff between revisions | """benchmark a bdiff between revisions | ||||
By default, benchmark a bdiff between its delta parent and itself. | By default, benchmark a bdiff between its delta parent and itself. | ||||
With ``--count``, benchmark bdiffs between delta parents and self for N | With ``--count``, benchmark bdiffs between delta parents and self for N | ||||
revisions starting at the specified revision. | revisions starting at the specified revision. | ||||
With ``--alldata``, assume the requested revision is a changeset and | With ``--alldata``, assume the requested revision is a changeset and | ||||
measure bdiffs for all changes related to that changeset (manifest | measure bdiffs for all changes related to that changeset (manifest | ||||
and filelogs). | and filelogs). | ||||
""" | """ | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
if opts['xdiff'] and not opts['blocks']: | if opts[b'xdiff'] and not opts[b'blocks']: | ||||
raise error.CommandError('perfbdiff', '--xdiff requires --blocks') | raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks') | ||||
if opts['alldata']: | if opts[b'alldata']: | ||||
opts['changelog'] = True | opts[b'changelog'] = True | ||||
if opts.get('changelog') or opts.get('manifest'): | if opts.get(b'changelog') or opts.get(b'manifest'): | ||||
file_, rev = None, file_ | file_, rev = None, file_ | ||||
elif rev is None: | elif rev is None: | ||||
raise error.CommandError('perfbdiff', 'invalid arguments') | raise error.CommandError(b'perfbdiff', b'invalid arguments') | ||||
blocks = opts['blocks'] | blocks = opts[b'blocks'] | ||||
xdiff = opts['xdiff'] | xdiff = opts[b'xdiff'] | ||||
textpairs = [] | textpairs = [] | ||||
r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts) | r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts) | ||||
startrev = r.rev(r.lookup(rev)) | startrev = r.rev(r.lookup(rev)) | ||||
for rev in range(startrev, min(startrev + count, len(r) - 1)): | for rev in range(startrev, min(startrev + count, len(r) - 1)): | ||||
if opts['alldata']: | if opts[b'alldata']: | ||||
# Load revisions associated with changeset. | # Load revisions associated with changeset. | ||||
ctx = repo[rev] | ctx = repo[rev] | ||||
mtext = _manifestrevision(repo, ctx.manifestnode()) | mtext = _manifestrevision(repo, ctx.manifestnode()) | ||||
for pctx in ctx.parents(): | for pctx in ctx.parents(): | ||||
pman = _manifestrevision(repo, pctx.manifestnode()) | pman = _manifestrevision(repo, pctx.manifestnode()) | ||||
textpairs.append((pman, mtext)) | textpairs.append((pman, mtext)) | ||||
# Load filelog revisions by iterating manifest delta. | # Load filelog revisions by iterating manifest delta. | ||||
if withthreads: | if withthreads: | ||||
done.set() | done.set() | ||||
for i in xrange(threads): | for i in xrange(threads): | ||||
q.put(None) | q.put(None) | ||||
with ready: | with ready: | ||||
ready.notify_all() | ready.notify_all() | ||||
@command('perfunidiff', revlogopts + formatteropts + [ | @command(b'perfunidiff', revlogopts + formatteropts + [ | ||||
('', 'count', 1, 'number of revisions to test (when using --startrev)'), | (b'', b'count', 1, b'number of revisions to test (when using --startrev)'), | ||||
('', 'alldata', False, 'test unidiffs for all associated revisions'), | (b'', b'alldata', False, b'test unidiffs for all associated revisions'), | ||||
], '-c|-m|FILE REV') | ], b'-c|-m|FILE REV') | ||||
def perfunidiff(ui, repo, file_, rev=None, count=None, **opts): | def perfunidiff(ui, repo, file_, rev=None, count=None, **opts): | ||||
"""benchmark a unified diff between revisions | """benchmark a unified diff between revisions | ||||
This doesn't include any copy tracing - it's just a unified diff | This doesn't include any copy tracing - it's just a unified diff | ||||
of the texts. | of the texts. | ||||
By default, benchmark a diff between its delta parent and itself. | By default, benchmark a diff between its delta parent and itself. | ||||
With ``--count``, benchmark diffs between delta parents and self for N | With ``--count``, benchmark diffs between delta parents and self for N | ||||
revisions starting at the specified revision. | revisions starting at the specified revision. | ||||
With ``--alldata``, assume the requested revision is a changeset and | With ``--alldata``, assume the requested revision is a changeset and | ||||
measure diffs for all changes related to that changeset (manifest | measure diffs for all changes related to that changeset (manifest | ||||
and filelogs). | and filelogs). | ||||
""" | """ | ||||
if opts['alldata']: | if opts[b'alldata']: | ||||
opts['changelog'] = True | opts[b'changelog'] = True | ||||
if opts.get('changelog') or opts.get('manifest'): | if opts.get(b'changelog') or opts.get(b'manifest'): | ||||
file_, rev = None, file_ | file_, rev = None, file_ | ||||
elif rev is None: | elif rev is None: | ||||
raise error.CommandError('perfunidiff', 'invalid arguments') | raise error.CommandError(b'perfunidiff', b'invalid arguments') | ||||
textpairs = [] | textpairs = [] | ||||
r = cmdutil.openrevlog(repo, 'perfunidiff', file_, opts) | r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts) | ||||
startrev = r.rev(r.lookup(rev)) | startrev = r.rev(r.lookup(rev)) | ||||
for rev in range(startrev, min(startrev + count, len(r) - 1)): | for rev in range(startrev, min(startrev + count, len(r) - 1)): | ||||
if opts['alldata']: | if opts[b'alldata']: | ||||
# Load revisions associated with changeset. | # Load revisions associated with changeset. | ||||
ctx = repo[rev] | ctx = repo[rev] | ||||
mtext = _manifestrevision(repo, ctx.manifestnode()) | mtext = _manifestrevision(repo, ctx.manifestnode()) | ||||
for pctx in ctx.parents(): | for pctx in ctx.parents(): | ||||
pman = _manifestrevision(repo, pctx.manifestnode()) | pman = _manifestrevision(repo, pctx.manifestnode()) | ||||
textpairs.append((pman, mtext)) | textpairs.append((pman, mtext)) | ||||
# Load filelog revisions by iterating manifest delta. | # Load filelog revisions by iterating manifest delta. | ||||
man = ctx.manifest() | man = ctx.manifest() | ||||
pman = ctx.p1().manifest() | pman = ctx.p1().manifest() | ||||
for filename, change in pman.diff(man).items(): | for filename, change in pman.diff(man).items(): | ||||
fctx = repo.file(filename) | fctx = repo.file(filename) | ||||
f1 = fctx.revision(change[0][0] or -1) | f1 = fctx.revision(change[0][0] or -1) | ||||
f2 = fctx.revision(change[1][0] or -1) | f2 = fctx.revision(change[1][0] or -1) | ||||
textpairs.append((f1, f2)) | textpairs.append((f1, f2)) | ||||
else: | else: | ||||
dp = r.deltaparent(rev) | dp = r.deltaparent(rev) | ||||
textpairs.append((r.revision(dp), r.revision(rev))) | textpairs.append((r.revision(dp), r.revision(rev))) | ||||
def d(): | def d(): | ||||
for left, right in textpairs: | for left, right in textpairs: | ||||
# The date strings don't matter, so we pass empty strings. | # The date strings don't matter, so we pass empty strings. | ||||
headerlines, hunks = mdiff.unidiff( | headerlines, hunks = mdiff.unidiff( | ||||
left, '', right, '', 'left', 'right', binary=False) | left, b'', right, b'', b'left', b'right', binary=False) | ||||
# consume iterators in roughly the way patch.py does | # consume iterators in roughly the way patch.py does | ||||
b'\n'.join(headerlines) | b'\n'.join(headerlines) | ||||
b''.join(sum((list(hlines) for hrange, hlines in hunks), [])) | b''.join(sum((list(hlines) for hrange, hlines in hunks), [])) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfdiffwd', formatteropts) | @command(b'perfdiffwd', formatteropts) | ||||
def perfdiffwd(ui, repo, **opts): | def perfdiffwd(ui, repo, **opts): | ||||
"""Profile diff of working directory changes""" | """Profile diff of working directory changes""" | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
options = { | options = { | ||||
'w': 'ignore_all_space', | b'w': b'ignore_all_space', | ||||
'b': 'ignore_space_change', | b'b': b'ignore_space_change', | ||||
'B': 'ignore_blank_lines', | b'B': b'ignore_blank_lines', | ||||
} | } | ||||
for diffopt in ('', 'w', 'b', 'B', 'wB'): | for diffopt in (b'', b'w', b'b', b'B', b'wB'): | ||||
opts = dict((options[c], '1') for c in diffopt) | opts = dict((options[c], b'1') for c in diffopt) | ||||
def d(): | def d(): | ||||
ui.pushbuffer() | ui.pushbuffer() | ||||
commands.diff(ui, repo, **opts) | commands.diff(ui, repo, **opts) | ||||
ui.popbuffer() | ui.popbuffer() | ||||
title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none') | title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none') | ||||
timer(d, title) | timer(d, title) | ||||
fm.end() | fm.end() | ||||
@command('perfrevlogindex', revlogopts + formatteropts, | @command(b'perfrevlogindex', revlogopts + formatteropts, | ||||
'-c|-m|FILE') | b'-c|-m|FILE') | ||||
def perfrevlogindex(ui, repo, file_=None, **opts): | def perfrevlogindex(ui, repo, file_=None, **opts): | ||||
"""Benchmark operations against a revlog index. | """Benchmark operations against a revlog index. | ||||
This tests constructing a revlog instance, reading index data, | This tests constructing a revlog instance, reading index data, | ||||
parsing index data, and performing various operations related to | parsing index data, and performing various operations related to | ||||
index data. | index data. | ||||
""" | """ | ||||
rl = cmdutil.openrevlog(repo, 'perfrevlogindex', file_, opts) | rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts) | ||||
opener = getattr(rl, 'opener') # trick linter | opener = getattr(rl, 'opener') # trick linter | ||||
indexfile = rl.indexfile | indexfile = rl.indexfile | ||||
data = opener.read(indexfile) | data = opener.read(indexfile) | ||||
header = struct.unpack('>I', data[0:4])[0] | header = struct.unpack(b'>I', data[0:4])[0] | ||||
version = header & 0xFFFF | version = header & 0xFFFF | ||||
if version == 1: | if version == 1: | ||||
revlogio = revlog.revlogio() | revlogio = revlog.revlogio() | ||||
inline = header & (1 << 16) | inline = header & (1 << 16) | ||||
else: | else: | ||||
raise error.Abort(('unsupported revlog version: %d') % version) | raise error.Abort((b'unsupported revlog version: %d') % version) | ||||
rllen = len(rl) | rllen = len(rl) | ||||
node0 = rl.node(0) | node0 = rl.node(0) | ||||
node25 = rl.node(rllen // 4) | node25 = rl.node(rllen // 4) | ||||
node50 = rl.node(rllen // 2) | node50 = rl.node(rllen // 2) | ||||
node75 = rl.node(rllen // 4 * 3) | node75 = rl.node(rllen // 4 * 3) | ||||
node100 = rl.node(rllen - 1) | node100 = rl.node(rllen - 1) | ||||
for i in range(count): | for i in range(count): | ||||
for node in nodes: | for node in nodes: | ||||
try: | try: | ||||
nodemap[node] | nodemap[node] | ||||
except error.RevlogError: | except error.RevlogError: | ||||
pass | pass | ||||
benches = [ | benches = [ | ||||
(constructor, 'revlog constructor'), | (constructor, b'revlog constructor'), | ||||
(read, 'read'), | (read, b'read'), | ||||
(parseindex, 'create index object'), | (parseindex, b'create index object'), | ||||
(lambda: getentry(0), 'retrieve index entry for rev 0'), | (lambda: getentry(0), b'retrieve index entry for rev 0'), | ||||
(lambda: resolvenode('a' * 20), 'look up missing node'), | (lambda: resolvenode(b'a' * 20), b'look up missing node'), | ||||
(lambda: resolvenode(node0), 'look up node at rev 0'), | (lambda: resolvenode(node0), b'look up node at rev 0'), | ||||
(lambda: resolvenode(node25), 'look up node at 1/4 len'), | (lambda: resolvenode(node25), b'look up node at 1/4 len'), | ||||
(lambda: resolvenode(node50), 'look up node at 1/2 len'), | (lambda: resolvenode(node50), b'look up node at 1/2 len'), | ||||
(lambda: resolvenode(node75), 'look up node at 3/4 len'), | (lambda: resolvenode(node75), b'look up node at 3/4 len'), | ||||
(lambda: resolvenode(node100), 'look up node at tip'), | (lambda: resolvenode(node100), b'look up node at tip'), | ||||
# 2x variation is to measure caching impact. | # 2x variation is to measure caching impact. | ||||
(lambda: resolvenodes(allnodes), | (lambda: resolvenodes(allnodes), | ||||
'look up all nodes (forward)'), | b'look up all nodes (forward)'), | ||||
(lambda: resolvenodes(allnodes, 2), | (lambda: resolvenodes(allnodes, 2), | ||||
'look up all nodes 2x (forward)'), | b'look up all nodes 2x (forward)'), | ||||
(lambda: resolvenodes(allnodesrev), | (lambda: resolvenodes(allnodesrev), | ||||
'look up all nodes (reverse)'), | b'look up all nodes (reverse)'), | ||||
(lambda: resolvenodes(allnodesrev, 2), | (lambda: resolvenodes(allnodesrev, 2), | ||||
'look up all nodes 2x (reverse)'), | b'look up all nodes 2x (reverse)'), | ||||
(lambda: getentries(allrevs), | (lambda: getentries(allrevs), | ||||
'retrieve all index entries (forward)'), | b'retrieve all index entries (forward)'), | ||||
(lambda: getentries(allrevs, 2), | (lambda: getentries(allrevs, 2), | ||||
'retrieve all index entries 2x (forward)'), | b'retrieve all index entries 2x (forward)'), | ||||
(lambda: getentries(allrevsrev), | (lambda: getentries(allrevsrev), | ||||
'retrieve all index entries (reverse)'), | b'retrieve all index entries (reverse)'), | ||||
(lambda: getentries(allrevsrev, 2), | (lambda: getentries(allrevsrev, 2), | ||||
'retrieve all index entries 2x (reverse)'), | b'retrieve all index entries 2x (reverse)'), | ||||
] | ] | ||||
for fn, title in benches: | for fn, title in benches: | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
timer(fn, title=title) | timer(fn, title=title) | ||||
fm.end() | fm.end() | ||||
@command('perfrevlogrevisions', revlogopts + formatteropts + | @command(b'perfrevlogrevisions', revlogopts + formatteropts + | ||||
[('d', 'dist', 100, 'distance between the revisions'), | [(b'd', b'dist', 100, b'distance between the revisions'), | ||||
('s', 'startrev', 0, 'revision to start reading at'), | (b's', b'startrev', 0, b'revision to start reading at'), | ||||
('', 'reverse', False, 'read in reverse')], | (b'', b'reverse', False, b'read in reverse')], | ||||
'-c|-m|FILE') | b'-c|-m|FILE') | ||||
def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False, | def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False, | ||||
**opts): | **opts): | ||||
"""Benchmark reading a series of revisions from a revlog. | """Benchmark reading a series of revisions from a revlog. | ||||
By default, we read every ``-d/--dist`` revision from 0 to tip of | By default, we read every ``-d/--dist`` revision from 0 to tip of | ||||
the specified revlog. | the specified revlog. | ||||
The start revision can be defined via ``-s/--startrev``. | The start revision can be defined via ``-s/--startrev``. | ||||
""" | """ | ||||
rl = cmdutil.openrevlog(repo, 'perfrevlogrevisions', file_, opts) | rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts) | ||||
rllen = getlen(ui)(rl) | rllen = getlen(ui)(rl) | ||||
def d(): | def d(): | ||||
rl.clearcaches() | rl.clearcaches() | ||||
beginrev = startrev | beginrev = startrev | ||||
endrev = rllen | endrev = rllen | ||||
dist = opts['dist'] | dist = opts[b'dist'] | ||||
if reverse: | if reverse: | ||||
beginrev, endrev = endrev, beginrev | beginrev, endrev = endrev, beginrev | ||||
dist = -1 * dist | dist = -1 * dist | ||||
for x in xrange(beginrev, endrev, dist): | for x in xrange(beginrev, endrev, dist): | ||||
# Old revisions don't support passing int. | # Old revisions don't support passing int. | ||||
n = rl.node(x) | n = rl.node(x) | ||||
rl.revision(n) | rl.revision(n) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfrevlogchunks', revlogopts + formatteropts + | @command(b'perfrevlogchunks', revlogopts + formatteropts + | ||||
[('e', 'engines', '', 'compression engines to use'), | [(b'e', b'engines', b'', b'compression engines to use'), | ||||
('s', 'startrev', 0, 'revision to start at')], | (b's', b'startrev', 0, b'revision to start at')], | ||||
'-c|-m|FILE') | b'-c|-m|FILE') | ||||
def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts): | def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts): | ||||
"""Benchmark operations on revlog chunks. | """Benchmark operations on revlog chunks. | ||||
Logically, each revlog is a collection of fulltext revisions. However, | Logically, each revlog is a collection of fulltext revisions. However, | ||||
stored within each revlog are "chunks" of possibly compressed data. This | stored within each revlog are "chunks" of possibly compressed data. This | ||||
data needs to be read and decompressed or compressed and written. | data needs to be read and decompressed or compressed and written. | ||||
This command measures the time it takes to read+decompress and recompress | This command measures the time it takes to read+decompress and recompress | ||||
chunks in a revlog. It effectively isolates I/O and compression performance. | chunks in a revlog. It effectively isolates I/O and compression performance. | ||||
For measurements of higher-level operations like resolving revisions, | For measurements of higher-level operations like resolving revisions, | ||||
see ``perfrevlogrevisions`` and ``perfrevlogrevision``. | see ``perfrevlogrevisions`` and ``perfrevlogrevision``. | ||||
""" | """ | ||||
rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts) | rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts) | ||||
# _chunkraw was renamed to _getsegmentforrevs. | # _chunkraw was renamed to _getsegmentforrevs. | ||||
try: | try: | ||||
segmentforrevs = rl._getsegmentforrevs | segmentforrevs = rl._getsegmentforrevs | ||||
except AttributeError: | except AttributeError: | ||||
segmentforrevs = rl._chunkraw | segmentforrevs = rl._chunkraw | ||||
# Verify engines argument. | # Verify engines argument. | ||||
if engines: | if engines: | ||||
engines = set(e.strip() for e in engines.split(',')) | engines = set(e.strip() for e in engines.split(b',')) | ||||
for engine in engines: | for engine in engines: | ||||
try: | try: | ||||
util.compressionengines[engine] | util.compressionengines[engine] | ||||
except KeyError: | except KeyError: | ||||
raise error.Abort('unknown compression engine: %s' % engine) | raise error.Abort(b'unknown compression engine: %s' % engine) | ||||
else: | else: | ||||
engines = [] | engines = [] | ||||
for e in util.compengines: | for e in util.compengines: | ||||
engine = util.compengines[e] | engine = util.compengines[e] | ||||
try: | try: | ||||
if engine.available(): | if engine.available(): | ||||
engine.revlogcompressor().compress('dummy') | engine.revlogcompressor().compress(b'dummy') | ||||
engines.append(e) | engines.append(e) | ||||
except NotImplementedError: | except NotImplementedError: | ||||
pass | pass | ||||
revs = list(rl.revs(startrev, len(rl) - 1)) | revs = list(rl.revs(startrev, len(rl) - 1)) | ||||
def rlfh(rl): | def rlfh(rl): | ||||
if rl._inline: | if rl._inline: | ||||
oldcompressor = rl._compressor | oldcompressor = rl._compressor | ||||
rl._compressor = compressor | rl._compressor = compressor | ||||
for chunk in chunks[0]: | for chunk in chunks[0]: | ||||
rl.compress(chunk) | rl.compress(chunk) | ||||
finally: | finally: | ||||
rl._compressor = oldcompressor | rl._compressor = oldcompressor | ||||
benches = [ | benches = [ | ||||
(lambda: doread(), 'read'), | (lambda: doread(), b'read'), | ||||
(lambda: doreadcachedfh(), 'read w/ reused fd'), | (lambda: doreadcachedfh(), b'read w/ reused fd'), | ||||
(lambda: doreadbatch(), 'read batch'), | (lambda: doreadbatch(), b'read batch'), | ||||
(lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'), | (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'), | ||||
(lambda: dochunk(), 'chunk'), | (lambda: dochunk(), b'chunk'), | ||||
(lambda: dochunkbatch(), 'chunk batch'), | (lambda: dochunkbatch(), b'chunk batch'), | ||||
] | ] | ||||
for engine in sorted(engines): | for engine in sorted(engines): | ||||
compressor = util.compengines[engine].revlogcompressor() | compressor = util.compengines[engine].revlogcompressor() | ||||
benches.append((functools.partial(docompress, compressor), | benches.append((functools.partial(docompress, compressor), | ||||
'compress w/ %s' % engine)) | b'compress w/ %s' % engine)) | ||||
for fn, title in benches: | for fn, title in benches: | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
timer(fn, title=title) | timer(fn, title=title) | ||||
fm.end() | fm.end() | ||||
@command('perfrevlogrevision', revlogopts + formatteropts + | @command(b'perfrevlogrevision', revlogopts + formatteropts + | ||||
[('', 'cache', False, 'use caches instead of clearing')], | [(b'', b'cache', False, b'use caches instead of clearing')], | ||||
'-c|-m|FILE REV') | b'-c|-m|FILE REV') | ||||
def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts): | def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts): | ||||
"""Benchmark obtaining a revlog revision. | """Benchmark obtaining a revlog revision. | ||||
Obtaining a revlog revision consists of roughly the following steps: | Obtaining a revlog revision consists of roughly the following steps: | ||||
1. Compute the delta chain | 1. Compute the delta chain | ||||
2. Obtain the raw chunks for that delta chain | 2. Obtain the raw chunks for that delta chain | ||||
3. Decompress each raw chunk | 3. Decompress each raw chunk | ||||
4. Apply binary patches to obtain fulltext | 4. Apply binary patches to obtain fulltext | ||||
5. Verify hash of fulltext | 5. Verify hash of fulltext | ||||
This command measures the time spent in each of these phases. | This command measures the time spent in each of these phases. | ||||
""" | """ | ||||
if opts.get('changelog') or opts.get('manifest'): | if opts.get(b'changelog') or opts.get(b'manifest'): | ||||
file_, rev = None, file_ | file_, rev = None, file_ | ||||
elif rev is None: | elif rev is None: | ||||
raise error.CommandError('perfrevlogrevision', 'invalid arguments') | raise error.CommandError(b'perfrevlogrevision', b'invalid arguments') | ||||
r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts) | r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts) | ||||
# _chunkraw was renamed to _getsegmentforrevs. | # _chunkraw was renamed to _getsegmentforrevs. | ||||
try: | try: | ||||
segmentforrevs = r._getsegmentforrevs | segmentforrevs = r._getsegmentforrevs | ||||
except AttributeError: | except AttributeError: | ||||
segmentforrevs = r._chunkraw | segmentforrevs = r._chunkraw | ||||
node = r.lookup(rev) | node = r.lookup(rev) | ||||
data = segmentforrevs(chain[0], chain[-1])[1] | data = segmentforrevs(chain[0], chain[-1])[1] | ||||
rawchunks = getrawchunks(data, chain) | rawchunks = getrawchunks(data, chain) | ||||
bins = r._chunks(chain) | bins = r._chunks(chain) | ||||
text = str(bins[0]) | text = str(bins[0]) | ||||
bins = bins[1:] | bins = bins[1:] | ||||
text = mdiff.patches(text, bins) | text = mdiff.patches(text, bins) | ||||
benches = [ | benches = [ | ||||
(lambda: dorevision(), 'full'), | (lambda: dorevision(), b'full'), | ||||
(lambda: dodeltachain(rev), 'deltachain'), | (lambda: dodeltachain(rev), b'deltachain'), | ||||
(lambda: doread(chain), 'read'), | (lambda: doread(chain), b'read'), | ||||
(lambda: dorawchunks(data, chain), 'rawchunks'), | (lambda: dorawchunks(data, chain), b'rawchunks'), | ||||
(lambda: dodecompress(rawchunks), 'decompress'), | (lambda: dodecompress(rawchunks), b'decompress'), | ||||
(lambda: dopatch(text, bins), 'patch'), | (lambda: dopatch(text, bins), b'patch'), | ||||
(lambda: dohash(text), 'hash'), | (lambda: dohash(text), b'hash'), | ||||
] | ] | ||||
for fn, title in benches: | for fn, title in benches: | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
timer(fn, title=title) | timer(fn, title=title) | ||||
fm.end() | fm.end() | ||||
@command('perfrevset', | @command(b'perfrevset', | ||||
[('C', 'clear', False, 'clear volatile cache between each call.'), | [(b'C', b'clear', False, b'clear volatile cache between each call.'), | ||||
('', 'contexts', False, 'obtain changectx for each revision')] | (b'', b'contexts', False, b'obtain changectx for each revision')] | ||||
+ formatteropts, "REVSET") | + formatteropts, b"REVSET") | ||||
def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts): | def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts): | ||||
"""benchmark the execution time of a revset | """benchmark the execution time of a revset | ||||
Use the --clean option if need to evaluate the impact of build volatile | Use the --clean option if need to evaluate the impact of build volatile | ||||
revisions set cache on the revset execution. Volatile cache hold filtered | revisions set cache on the revset execution. Volatile cache hold filtered | ||||
and obsolete related cache.""" | and obsolete related cache.""" | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
def d(): | def d(): | ||||
if clear: | if clear: | ||||
repo.invalidatevolatilesets() | repo.invalidatevolatilesets() | ||||
if contexts: | if contexts: | ||||
for ctx in repo.set(expr): pass | for ctx in repo.set(expr): pass | ||||
else: | else: | ||||
for r in repo.revs(expr): pass | for r in repo.revs(expr): pass | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command('perfvolatilesets', | @command(b'perfvolatilesets', | ||||
[('', 'clear-obsstore', False, 'drop obsstore between each call.'), | [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'), | ||||
] + formatteropts) | ] + formatteropts) | ||||
def perfvolatilesets(ui, repo, *names, **opts): | def perfvolatilesets(ui, repo, *names, **opts): | ||||
"""benchmark the computation of various volatile set | """benchmark the computation of various volatile set | ||||
Volatile set computes element related to filtering and obsolescence.""" | Volatile set computes element related to filtering and obsolescence.""" | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
repo = repo.unfiltered() | repo = repo.unfiltered() | ||||
def getobs(name): | def getobs(name): | ||||
def d(): | def d(): | ||||
repo.invalidatevolatilesets() | repo.invalidatevolatilesets() | ||||
if opts['clear_obsstore']: | if opts[b'clear_obsstore']: | ||||
clearfilecache(repo, 'obsstore') | clearfilecache(repo, b'obsstore') | ||||
obsolete.getrevs(repo, name) | obsolete.getrevs(repo, name) | ||||
return d | return d | ||||
allobs = sorted(obsolete.cachefuncs) | allobs = sorted(obsolete.cachefuncs) | ||||
if names: | if names: | ||||
allobs = [n for n in allobs if n in names] | allobs = [n for n in allobs if n in names] | ||||
for name in allobs: | for name in allobs: | ||||
timer(getobs(name), title=name) | timer(getobs(name), title=name) | ||||
def getfiltered(name): | def getfiltered(name): | ||||
def d(): | def d(): | ||||
repo.invalidatevolatilesets() | repo.invalidatevolatilesets() | ||||
if opts['clear_obsstore']: | if opts[b'clear_obsstore']: | ||||
clearfilecache(repo, 'obsstore') | clearfilecache(repo, b'obsstore') | ||||
repoview.filterrevs(repo, name) | repoview.filterrevs(repo, name) | ||||
return d | return d | ||||
allfilter = sorted(repoview.filtertable) | allfilter = sorted(repoview.filtertable) | ||||
if names: | if names: | ||||
allfilter = [n for n in allfilter if n in names] | allfilter = [n for n in allfilter if n in names] | ||||
for name in allfilter: | for name in allfilter: | ||||
timer(getfiltered(name), title=name) | timer(getfiltered(name), title=name) | ||||
fm.end() | fm.end() | ||||
@command('perfbranchmap', | @command(b'perfbranchmap', | ||||
[('f', 'full', False, | [(b'f', b'full', False, | ||||
'Includes build time of subset'), | b'Includes build time of subset'), | ||||
('', 'clear-revbranch', False, | (b'', b'clear-revbranch', False, | ||||
'purge the revbranch cache between computation'), | b'purge the revbranch cache between computation'), | ||||
] + formatteropts) | ] + formatteropts) | ||||
def perfbranchmap(ui, repo, *filternames, **opts): | def perfbranchmap(ui, repo, *filternames, **opts): | ||||
"""benchmark the update of a branchmap | """benchmark the update of a branchmap | ||||
This benchmarks the full repo.branchmap() call with read and write disabled | This benchmarks the full repo.branchmap() call with read and write disabled | ||||
""" | """ | ||||
full = opts.get("full", False) | full = opts.get(b"full", False) | ||||
clear_revbranch = opts.get("clear_revbranch", False) | clear_revbranch = opts.get(b"clear_revbranch", False) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
def getbranchmap(filtername): | def getbranchmap(filtername): | ||||
"""generate a benchmark function for the filtername""" | """generate a benchmark function for the filtername""" | ||||
if filtername is None: | if filtername is None: | ||||
view = repo | view = repo | ||||
else: | else: | ||||
view = repo.filtered(filtername) | view = repo.filtered(filtername) | ||||
def d(): | def d(): | ||||
subsettable = getbranchmapsubsettable() | subsettable = getbranchmapsubsettable() | ||||
allfilters = [] | allfilters = [] | ||||
while possiblefilters: | while possiblefilters: | ||||
for name in possiblefilters: | for name in possiblefilters: | ||||
subset = subsettable.get(name) | subset = subsettable.get(name) | ||||
if subset not in possiblefilters: | if subset not in possiblefilters: | ||||
break | break | ||||
else: | else: | ||||
assert False, 'subset cycle %s!' % possiblefilters | assert False, b'subset cycle %s!' % possiblefilters | ||||
allfilters.append(name) | allfilters.append(name) | ||||
possiblefilters.remove(name) | possiblefilters.remove(name) | ||||
# warm the cache | # warm the cache | ||||
if not full: | if not full: | ||||
for name in allfilters: | for name in allfilters: | ||||
repo.filtered(name).branchmap() | repo.filtered(name).branchmap() | ||||
if not filternames or 'unfiltered' in filternames: | if not filternames or b'unfiltered' in filternames: | ||||
# add unfiltered | # add unfiltered | ||||
allfilters.append(None) | allfilters.append(None) | ||||
branchcacheread = safeattrsetter(branchmap, 'read') | branchcacheread = safeattrsetter(branchmap, b'read') | ||||
branchcachewrite = safeattrsetter(branchmap.branchcache, 'write') | branchcachewrite = safeattrsetter(branchmap.branchcache, b'write') | ||||
branchcacheread.set(lambda repo: None) | branchcacheread.set(lambda repo: None) | ||||
branchcachewrite.set(lambda bc, repo: None) | branchcachewrite.set(lambda bc, repo: None) | ||||
try: | try: | ||||
for name in allfilters: | for name in allfilters: | ||||
printname = name | printname = name | ||||
if name is None: | if name is None: | ||||
printname = 'unfiltered' | printname = b'unfiltered' | ||||
timer(getbranchmap(name), title=str(printname)) | timer(getbranchmap(name), title=str(printname)) | ||||
finally: | finally: | ||||
branchcacheread.restore() | branchcacheread.restore() | ||||
branchcachewrite.restore() | branchcachewrite.restore() | ||||
fm.end() | fm.end() | ||||
@command('perfbranchmapload', [ | @command(b'perfbranchmapload', [ | ||||
('f', 'filter', '', 'Specify repoview filter'), | (b'f', b'filter', b'', b'Specify repoview filter'), | ||||
('', 'list', False, 'List brachmap filter caches'), | (b'', b'list', False, b'List brachmap filter caches'), | ||||
] + formatteropts) | ] + formatteropts) | ||||
def perfbranchmapread(ui, repo, filter='', list=False, **opts): | def perfbranchmapread(ui, repo, filter=b'', list=False, **opts): | ||||
"""benchmark reading the branchmap""" | """benchmark reading the branchmap""" | ||||
if list: | if list: | ||||
for name, kind, st in repo.cachevfs.readdir(stat=True): | for name, kind, st in repo.cachevfs.readdir(stat=True): | ||||
if name.startswith('branch2'): | if name.startswith(b'branch2'): | ||||
filtername = name.partition('-')[2] or 'unfiltered' | filtername = name.partition(b'-')[2] or b'unfiltered' | ||||
ui.status('%s - %s\n' | ui.status(b'%s - %s\n' | ||||
% (filtername, util.bytecount(st.st_size))) | % (filtername, util.bytecount(st.st_size))) | ||||
return | return | ||||
if filter: | if filter: | ||||
repo = repoview.repoview(repo, filter) | repo = repoview.repoview(repo, filter) | ||||
else: | else: | ||||
repo = repo.unfiltered() | repo = repo.unfiltered() | ||||
# try once without timer, the filter may not be cached | # try once without timer, the filter may not be cached | ||||
if branchmap.read(repo) is None: | if branchmap.read(repo) is None: | ||||
raise error.Abort('No brachmap cached for %s repo' | raise error.Abort(b'No brachmap cached for %s repo' | ||||
% (filter or 'unfiltered')) | % (filter or b'unfiltered')) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
timer(lambda: branchmap.read(repo) and None) | timer(lambda: branchmap.read(repo) and None) | ||||
fm.end() | fm.end() | ||||
@command('perfloadmarkers') | @command(b'perfloadmarkers') | ||||
def perfloadmarkers(ui, repo): | def perfloadmarkers(ui, repo): | ||||
"""benchmark the time to parse the on-disk markers for a repo | """benchmark the time to parse the on-disk markers for a repo | ||||
Result is the number of markers in the repo.""" | Result is the number of markers in the repo.""" | ||||
timer, fm = gettimer(ui) | timer, fm = gettimer(ui) | ||||
svfs = getsvfs(repo) | svfs = getsvfs(repo) | ||||
timer(lambda: len(obsolete.obsstore(svfs))) | timer(lambda: len(obsolete.obsstore(svfs))) | ||||
fm.end() | fm.end() | ||||
@command('perflrucachedict', formatteropts + | @command(b'perflrucachedict', formatteropts + | ||||
[('', 'size', 4, 'size of cache'), | [(b'', b'size', 4, b'size of cache'), | ||||
('', 'gets', 10000, 'number of key lookups'), | (b'', b'gets', 10000, b'number of key lookups'), | ||||
('', 'sets', 10000, 'number of key sets'), | (b'', b'sets', 10000, b'number of key sets'), | ||||
('', 'mixed', 10000, 'number of mixed mode operations'), | (b'', b'mixed', 10000, b'number of mixed mode operations'), | ||||
('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')], | (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')], | ||||
norepo=True) | norepo=True) | ||||
def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000, | def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000, | ||||
mixedgetfreq=50, **opts): | mixedgetfreq=50, **opts): | ||||
def doinit(): | def doinit(): | ||||
for i in xrange(10000): | for i in xrange(10000): | ||||
util.lrucachedict(size) | util.lrucachedict(size) | ||||
values = [] | values = [] | ||||
try: | try: | ||||
d[v] | d[v] | ||||
except KeyError: | except KeyError: | ||||
pass | pass | ||||
else: | else: | ||||
d[v] = v | d[v] = v | ||||
benches = [ | benches = [ | ||||
(doinit, 'init'), | (doinit, b'init'), | ||||
(dogets, 'gets'), | (dogets, b'gets'), | ||||
(dosets, 'sets'), | (dosets, b'sets'), | ||||
(domixed, 'mixed') | (domixed, b'mixed') | ||||
] | ] | ||||
for fn, title in benches: | for fn, title in benches: | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
timer(fn, title=title) | timer(fn, title=title) | ||||
fm.end() | fm.end() | ||||
@command('perfwrite', formatteropts) | @command(b'perfwrite', formatteropts) | ||||
def perfwrite(ui, repo, **opts): | def perfwrite(ui, repo, **opts): | ||||
"""microbenchmark ui.write | """microbenchmark ui.write | ||||
""" | """ | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
def write(): | def write(): | ||||
for i in range(100000): | for i in range(100000): | ||||
ui.write(('Testing write performance\n')) | ui.write((b'Testing write performance\n')) | ||||
timer(write) | timer(write) | ||||
fm.end() | fm.end() | ||||
def uisetup(ui): | def uisetup(ui): | ||||
if (util.safehasattr(cmdutil, 'openrevlog') and | if (util.safehasattr(cmdutil, b'openrevlog') and | ||||
not util.safehasattr(commands, 'debugrevlogopts')): | not util.safehasattr(commands, b'debugrevlogopts')): | ||||
# for "historical portability": | # for "historical portability": | ||||
# In this case, Mercurial should be 1.9 (or a79fea6b3e77) - | # In this case, Mercurial should be 1.9 (or a79fea6b3e77) - | ||||
# 3.7 (or 5606f7d0d063). Therefore, '--dir' option for | # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for | ||||
# openrevlog() should cause failure, because it has been | # openrevlog() should cause failure, because it has been | ||||
# available since 3.5 (or 49c583ca48c4). | # available since 3.5 (or 49c583ca48c4). | ||||
def openrevlog(orig, repo, cmd, file_, opts): | def openrevlog(orig, repo, cmd, file_, opts): | ||||
if opts.get('dir') and not util.safehasattr(repo, 'dirlog'): | if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'): | ||||
raise error.Abort("This version doesn't support --dir option", | raise error.Abort(b"This version doesn't support --dir option", | ||||
hint="use 3.5 or later") | hint=b"use 3.5 or later") | ||||
return orig(repo, cmd, file_, opts) | return orig(repo, cmd, file_, opts) | ||||
extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog) | extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog) |