These scripts weren't blackened. I found these as part of adding
script checking to test-check-format.t.
- skip-blame black
pulkit |
hg-reviewers |
These scripts weren't blackened. I found these as part of adding
script checking to test-check-format.t.
Automatic diff as part of commit; lint not applicable. |
Automatic diff as part of commit; unit tests not applicable. |
Path | Packages | |||
---|---|---|---|---|
M | doc/docchecker (24 lines) | |||
M | doc/runrst (18 lines) | |||
M | hgweb.cgi (9 lines) | |||
M | i18n/hggettext (26 lines) | |||
M | i18n/posplit (13 lines) | |||
M | tests/f (131 lines) | |||
M | tests/hghave (21 lines) |
Status | Author | Revision | |
---|---|---|---|
Closed | indygreg | ||
Closed | indygreg | ||
Closed | indygreg | D7446 black: blacken scripts | |
Closed | indygreg |
from __future__ import absolute_import, print_function | from __future__ import absolute_import, print_function | ||||
import os | import os | ||||
import re | import re | ||||
import sys | import sys | ||||
try: | try: | ||||
import msvcrt | import msvcrt | ||||
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) | msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) | ||||
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY) | msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY) | ||||
except ImportError: | except ImportError: | ||||
pass | pass | ||||
stdout = getattr(sys.stdout, 'buffer', sys.stdout) | stdout = getattr(sys.stdout, 'buffer', sys.stdout) | ||||
leadingline = re.compile(br'(^\s*)(\S.*)$') | leadingline = re.compile(br'(^\s*)(\S.*)$') | ||||
checks = [ | checks = [ | ||||
(br""":hg:`[^`]*'[^`]*`""", | ( | ||||
b"""warning: please avoid nesting ' in :hg:`...`"""), | br""":hg:`[^`]*'[^`]*`""", | ||||
(br'\w:hg:`', | b"""warning: please avoid nesting ' in :hg:`...`""", | ||||
b'warning: please have a space before :hg:'), | ), | ||||
(br"""(?:[^a-z][^'.])hg ([^,;"`]*'(?!hg)){2}""", | (br'\w:hg:`', b'warning: please have a space before :hg:'), | ||||
b'''warning: please use " instead of ' for hg ... "..."'''), | ( | ||||
br"""(?:[^a-z][^'.])hg ([^,;"`]*'(?!hg)){2}""", | |||||
b'''warning: please use " instead of ' for hg ... "..."''', | |||||
), | |||||
] | ] | ||||
def check(line): | def check(line): | ||||
messages = [] | messages = [] | ||||
for match, msg in checks: | for match, msg in checks: | ||||
if re.search(match, line): | if re.search(match, line): | ||||
messages.append(msg) | messages.append(msg) | ||||
if messages: | if messages: | ||||
stdout.write(b'%s\n' % line) | stdout.write(b'%s\n' % line) | ||||
for msg in messages: | for msg in messages: | ||||
stdout.write(b'%s\n' % msg) | stdout.write(b'%s\n' % msg) | ||||
def work(file): | def work(file): | ||||
(llead, lline) = (b'', b'') | (llead, lline) = (b'', b'') | ||||
for line in file: | for line in file: | ||||
# this section unwraps lines | # this section unwraps lines | ||||
match = leadingline.match(line) | match = leadingline.match(line) | ||||
if not match: | if not match: | ||||
check(lline) | check(lline) | ||||
(llead, lline) = (b'', b'') | (llead, lline) = (b'', b'') | ||||
continue | continue | ||||
lead, line = match.group(1), match.group(2) | lead, line = match.group(1), match.group(2) | ||||
if (lead == llead): | if lead == llead: | ||||
if (lline != b''): | if lline != b'': | ||||
lline += b' ' + line | lline += b' ' + line | ||||
else: | else: | ||||
lline = line | lline = line | ||||
else: | else: | ||||
check(lline) | check(lline) | ||||
(llead, lline) = (lead, line) | (llead, lline) = (lead, line) | ||||
check(lline) | check(lline) | ||||
def main(): | def main(): | ||||
for f in sys.argv[1:]: | for f in sys.argv[1:]: | ||||
try: | try: | ||||
with open(f, 'rb') as file: | with open(f, 'rb') as file: | ||||
work(file) | work(file) | ||||
except BaseException as e: | except BaseException as e: | ||||
sys.stdout.write(r"failed to process %s: %s\n" % (f, e)) | sys.stdout.write(r"failed to process %s: %s\n" % (f, e)) | ||||
main() | main() |
"""usage: %s WRITER args... | """usage: %s WRITER args... | ||||
where WRITER is the name of a Docutils writer such as 'html' or 'manpage' | where WRITER is the name of a Docutils writer such as 'html' or 'manpage' | ||||
""" | """ | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import sys | import sys | ||||
try: | try: | ||||
import docutils.core as core | import docutils.core as core | ||||
import docutils.nodes as nodes | import docutils.nodes as nodes | ||||
import docutils.utils as utils | import docutils.utils as utils | ||||
import docutils.parsers.rst.roles as roles | import docutils.parsers.rst.roles as roles | ||||
except ImportError: | except ImportError: | ||||
sys.stderr.write("abort: couldn't generate documentation: docutils " | sys.stderr.write( | ||||
"module is missing\n") | "abort: couldn't generate documentation: docutils " | ||||
sys.stderr.write("please install python-docutils or see " | "module is missing\n" | ||||
"http://docutils.sourceforge.net/\n") | ) | ||||
sys.stderr.write( | |||||
"please install python-docutils or see " | |||||
"http://docutils.sourceforge.net/\n" | |||||
) | |||||
sys.exit(-1) | sys.exit(-1) | ||||
def role_hg(name, rawtext, text, lineno, inliner, options=None, content=None): | def role_hg(name, rawtext, text, lineno, inliner, options=None, content=None): | ||||
text = "hg " + utils.unescape(text) | text = "hg " + utils.unescape(text) | ||||
linktext = nodes.literal(rawtext, text) | linktext = nodes.literal(rawtext, text) | ||||
parts = text.split() | parts = text.split() | ||||
cmd, args = parts[1], parts[2:] | cmd, args = parts[1], parts[2:] | ||||
refuri = "hg.1.html#%s" % cmd | refuri = "hg.1.html#%s" % cmd | ||||
if cmd == 'help' and args: | if cmd == 'help' and args: | ||||
if args[0] == 'config': | if args[0] == 'config': | ||||
# :hg:`help config` | # :hg:`help config` | ||||
refuri = "hgrc.5.html" | refuri = "hgrc.5.html" | ||||
elif args[0].startswith('config.'): | elif args[0].startswith('config.'): | ||||
# :hg:`help config.SECTION...` | # :hg:`help config.SECTION...` | ||||
refuri = "hgrc.5.html#%s" % args[0].split('.', 2)[1] | refuri = "hgrc.5.html#%s" % args[0].split('.', 2)[1] | ||||
elif len(args) >= 2 and args[0] == '-c': | elif len(args) >= 2 and args[0] == '-c': | ||||
# :hg:`help -c COMMAND ...` is equivalent to :hg:`COMMAND` | # :hg:`help -c COMMAND ...` is equivalent to :hg:`COMMAND` | ||||
# (mainly for :hg:`help -c config`) | # (mainly for :hg:`help -c config`) | ||||
refuri = "hg.1.html#%s" % args[1] | refuri = "hg.1.html#%s" % args[1] | ||||
else: | else: | ||||
refuri = "hg.1.html#%s" % args[0] | refuri = "hg.1.html#%s" % args[0] | ||||
node = nodes.reference(rawtext, '', linktext, | node = nodes.reference(rawtext, '', linktext, refuri=refuri) | ||||
refuri=refuri) | |||||
return [node], [] | return [node], [] | ||||
roles.register_local_role("hg", role_hg) | roles.register_local_role("hg", role_hg) | ||||
if __name__ == "__main__": | if __name__ == "__main__": | ||||
if len(sys.argv) < 2: | if len(sys.argv) < 2: | ||||
sys.stderr.write(__doc__ % sys.argv[0]) | sys.stderr.write(__doc__ % sys.argv[0]) | ||||
sys.exit(1) | sys.exit(1) | ||||
writer = sys.argv[1] | writer = sys.argv[1] | ||||
del sys.argv[1] | del sys.argv[1] | ||||
core.publish_cmdline(writer_name=writer) | core.publish_cmdline(writer_name=writer) |
#!/usr/bin/env python | #!/usr/bin/env python | ||||
# | # | ||||
# An example hgweb CGI script, edit as necessary | # An example hgweb CGI script, edit as necessary | ||||
# See also https://mercurial-scm.org/wiki/PublishingRepositories | # See also https://mercurial-scm.org/wiki/PublishingRepositories | ||||
# Path to repo or hgweb config to serve (see 'hg help hgweb') | # Path to repo or hgweb config to serve (see 'hg help hgweb') | ||||
config = "/path/to/repo/or/config" | config = "/path/to/repo/or/config" | ||||
# Uncomment and adjust if Mercurial is not installed system-wide | # Uncomment and adjust if Mercurial is not installed system-wide | ||||
# (consult "installed modules" path from 'hg debuginstall'): | # (consult "installed modules" path from 'hg debuginstall'): | ||||
#import sys; sys.path.insert(0, "/path/to/python/lib") | # import sys; sys.path.insert(0, "/path/to/python/lib") | ||||
# Uncomment to send python tracebacks to the browser if an error occurs: | # Uncomment to send python tracebacks to the browser if an error occurs: | ||||
#import cgitb; cgitb.enable() | # import cgitb; cgitb.enable() | ||||
from mercurial import demandimport; demandimport.enable() | from mercurial import demandimport | ||||
demandimport.enable() | |||||
from mercurial.hgweb import hgweb, wsgicgi | from mercurial.hgweb import hgweb, wsgicgi | ||||
application = hgweb(config) | application = hgweb(config) | ||||
wsgicgi.launch(application) | wsgicgi.launch(application) |
lines[-1] = lines[-1] + '\n' | lines[-1] = lines[-1] + '\n' | ||||
lines = map(escape, lines) | lines = map(escape, lines) | ||||
lineterm = '\\n"\n"' | lineterm = '\\n"\n"' | ||||
s = '""\n"' + lineterm.join(lines) + '"' | s = '""\n"' + lineterm.join(lines) + '"' | ||||
return s | return s | ||||
def poentry(path, lineno, s): | def poentry(path, lineno, s): | ||||
return ('#: %s:%d\n' % (path, lineno) + | return ( | ||||
'msgid %s\n' % normalize(s) + | '#: %s:%d\n' % (path, lineno) | ||||
'msgstr ""\n') | + 'msgid %s\n' % normalize(s) | ||||
+ 'msgstr ""\n' | |||||
) | |||||
doctestre = re.compile(r'^ +>>> ', re.MULTILINE) | doctestre = re.compile(r'^ +>>> ', re.MULTILINE) | ||||
def offset(src, doc, name, lineno, default): | def offset(src, doc, name, lineno, default): | ||||
"""Compute offset or issue a warning on stdout.""" | """Compute offset or issue a warning on stdout.""" | ||||
# remove doctest part, in order to avoid backslash mismatching | # remove doctest part, in order to avoid backslash mismatching | ||||
m = doctestre.search(doc) | m = doctestre.search(doc) | ||||
if m: | if m: | ||||
doc = doc[:m.start()] | doc = doc[: m.start()] | ||||
# Backslashes in doc appear doubled in src. | # Backslashes in doc appear doubled in src. | ||||
end = src.find(doc.replace('\\', '\\\\')) | end = src.find(doc.replace('\\', '\\\\')) | ||||
if end == -1: | if end == -1: | ||||
# This can happen if the docstring contains unnecessary escape | # This can happen if the docstring contains unnecessary escape | ||||
# sequences such as \" in a triple-quoted string. The problem | # sequences such as \" in a triple-quoted string. The problem | ||||
# is that \" is turned into " and so doc wont appear in src. | # is that \" is turned into " and so doc wont appear in src. | ||||
sys.stderr.write("%s:%d:warning:" | sys.stderr.write( | ||||
"%s:%d:warning:" | |||||
" unknown docstr offset, assuming %d lines\n" | " unknown docstr offset, assuming %d lines\n" | ||||
% (name, lineno, default)) | % (name, lineno, default) | ||||
) | |||||
return default | return default | ||||
else: | else: | ||||
return src.count('\n', 0, end) | return src.count('\n', 0, end) | ||||
def importpath(path): | def importpath(path): | ||||
"""Import a path like foo/bar/baz.py and return the baz module.""" | """Import a path like foo/bar/baz.py and return the baz module.""" | ||||
if path.endswith('.py'): | if path.endswith('.py'): | ||||
cmdtable = getattr(mod, 'cmdtable', {}) | cmdtable = getattr(mod, 'cmdtable', {}) | ||||
if not cmdtable: | if not cmdtable: | ||||
# Maybe we are processing mercurial.commands? | # Maybe we are processing mercurial.commands? | ||||
cmdtable = getattr(mod, 'table', {}) | cmdtable = getattr(mod, 'table', {}) | ||||
functions.extend((c[0], False) for c in cmdtable.itervalues()) | functions.extend((c[0], False) for c in cmdtable.itervalues()) | ||||
for func, rstrip in functions: | for func, rstrip in functions: | ||||
if func.__doc__: | if func.__doc__: | ||||
docobj = func # this might be a proxy to provide formatted doc | docobj = func # this might be a proxy to provide formatted doc | ||||
func = getattr(func, '_origfunc', func) | func = getattr(func, '_origfunc', func) | ||||
funcmod = inspect.getmodule(func) | funcmod = inspect.getmodule(func) | ||||
extra = '' | extra = '' | ||||
if funcmod.__package__ == funcmod.__name__: | if funcmod.__package__ == funcmod.__name__: | ||||
extra = '/__init__' | extra = '/__init__' | ||||
actualpath = '%s%s.py' % (funcmod.__name__.replace('.', '/'), extra) | actualpath = '%s%s.py' % (funcmod.__name__.replace('.', '/'), extra) | ||||
src = inspect.getsource(func) | src = inspect.getsource(func) | ||||
if __name__ == "__main__": | if __name__ == "__main__": | ||||
# It is very important that we import the Mercurial modules from | # It is very important that we import the Mercurial modules from | ||||
# the source tree where hggettext is executed. Otherwise we might | # the source tree where hggettext is executed. Otherwise we might | ||||
# accidentally import and extract strings from a Mercurial | # accidentally import and extract strings from a Mercurial | ||||
# installation mentioned in PYTHONPATH. | # installation mentioned in PYTHONPATH. | ||||
sys.path.insert(0, os.getcwd()) | sys.path.insert(0, os.getcwd()) | ||||
from mercurial import demandimport; demandimport.enable() | from mercurial import demandimport | ||||
demandimport.enable() | |||||
for path in sys.argv[1:]: | for path in sys.argv[1:]: | ||||
if path.endswith('.txt'): | if path.endswith('.txt'): | ||||
rawtext(path) | rawtext(path) | ||||
else: | else: | ||||
docstrings(path) | docstrings(path) |
#!/usr/bin/env python | #!/usr/bin/env python | ||||
# | # | ||||
# posplit - split messages in paragraphs on .po/.pot files | # posplit - split messages in paragraphs on .po/.pot files | ||||
# | # | ||||
# license: MIT/X11/Expat | # license: MIT/X11/Expat | ||||
# | # | ||||
from __future__ import absolute_import, print_function | from __future__ import absolute_import, print_function | ||||
import polib | import polib | ||||
import re | import re | ||||
import sys | import sys | ||||
def addentry(po, entry, cache): | def addentry(po, entry, cache): | ||||
e = cache.get(entry.msgid) | e = cache.get(entry.msgid) | ||||
if e: | if e: | ||||
e.occurrences.extend(entry.occurrences) | e.occurrences.extend(entry.occurrences) | ||||
# merge comments from entry | # merge comments from entry | ||||
for comment in entry.comment.split('\n'): | for comment in entry.comment.split('\n'): | ||||
if comment and comment not in e.comment: | if comment and comment not in e.comment: | ||||
if not e.comment: | if not e.comment: | ||||
e.comment = comment | e.comment = comment | ||||
else: | else: | ||||
e.comment += '\n' + comment | e.comment += '\n' + comment | ||||
else: | else: | ||||
po.append(entry) | po.append(entry) | ||||
cache[entry.msgid] = entry | cache[entry.msgid] = entry | ||||
def mkentry(orig, delta, msgid, msgstr): | def mkentry(orig, delta, msgid, msgstr): | ||||
entry = polib.POEntry() | entry = polib.POEntry() | ||||
entry.merge(orig) | entry.merge(orig) | ||||
entry.msgid = msgid or orig.msgid | entry.msgid = msgid or orig.msgid | ||||
entry.msgstr = msgstr or orig.msgstr | entry.msgstr = msgstr or orig.msgstr | ||||
entry.occurrences = [(p, int(l) + delta) for (p, l) in orig.occurrences] | entry.occurrences = [(p, int(l) + delta) for (p, l) in orig.occurrences] | ||||
return entry | return entry | ||||
if __name__ == "__main__": | if __name__ == "__main__": | ||||
po = polib.pofile(sys.argv[1]) | po = polib.pofile(sys.argv[1]) | ||||
cache = {} | cache = {} | ||||
entries = po[:] | entries = po[:] | ||||
po[:] = [] | po[:] = [] | ||||
findd = re.compile(r' *\.\. (\w+)::') # for finding directives | findd = re.compile(r' *\.\. (\w+)::') # for finding directives | ||||
for entry in entries: | for entry in entries: | ||||
msgids = entry.msgid.split(u'\n\n') | msgids = entry.msgid.split(u'\n\n') | ||||
if entry.msgstr: | if entry.msgstr: | ||||
msgstrs = entry.msgstr.split(u'\n\n') | msgstrs = entry.msgstr.split(u'\n\n') | ||||
else: | else: | ||||
msgstrs = [u''] * len(msgids) | msgstrs = [u''] * len(msgids) | ||||
if len(msgids) != len(msgstrs): | if len(msgids) != len(msgstrs): | ||||
# places the whole existing translation as a fuzzy | # places the whole existing translation as a fuzzy | ||||
# translation for each paragraph, to give the | # translation for each paragraph, to give the | ||||
# translator a chance to recover part of the old | # translator a chance to recover part of the old | ||||
# translation - erasing extra paragraphs is | # translation - erasing extra paragraphs is | ||||
# probably better than retranslating all from start | # probably better than retranslating all from start | ||||
if 'fuzzy' not in entry.flags: | if 'fuzzy' not in entry.flags: | ||||
entry.flags.append('fuzzy') | entry.flags.append('fuzzy') | ||||
msgstrs = [entry.msgstr] * len(msgids) | msgstrs = [entry.msgstr] * len(msgids) | ||||
delta = 0 | delta = 0 | ||||
for msgid, msgstr in zip(msgids, msgstrs): | for msgid, msgstr in zip(msgids, msgstrs): | ||||
if msgid and msgid != '::': | if msgid and msgid != '::': | ||||
newentry = mkentry(entry, delta, msgid, msgstr) | newentry = mkentry(entry, delta, msgid, msgstr) | ||||
mdirective = findd.match(msgid) | mdirective = findd.match(msgid) | ||||
if mdirective: | if mdirective: | ||||
if not msgid[mdirective.end():].rstrip(): | if not msgid[mdirective.end() :].rstrip(): | ||||
# only directive, nothing to translate here | # only directive, nothing to translate here | ||||
delta += 2 | delta += 2 | ||||
continue | continue | ||||
directive = mdirective.group(1) | directive = mdirective.group(1) | ||||
if directive in ('container', 'include'): | if directive in ('container', 'include'): | ||||
if msgid.rstrip('\n').count('\n') == 0: | if msgid.rstrip('\n').count('\n') == 0: | ||||
# only rst syntax, nothing to translate | # only rst syntax, nothing to translate | ||||
delta += 2 | delta += 2 | ||||
continue | continue | ||||
else: | else: | ||||
# lines following directly, unexpected | # lines following directly, unexpected | ||||
print('Warning: text follows line with directive' | print( | ||||
' %s' % directive) | 'Warning: text follows line with directive' | ||||
' %s' % directive | |||||
) | |||||
comment = 'do not translate: .. %s::' % directive | comment = 'do not translate: .. %s::' % directive | ||||
if not newentry.comment: | if not newentry.comment: | ||||
newentry.comment = comment | newentry.comment = comment | ||||
elif comment not in newentry.comment: | elif comment not in newentry.comment: | ||||
newentry.comment += '\n' + comment | newentry.comment += '\n' + comment | ||||
addentry(po, newentry, cache) | addentry(po, newentry, cache) | ||||
delta += 2 + msgid.count('\n') | delta += 2 + msgid.count('\n') | ||||
po.save() | po.save() |
import glob | import glob | ||||
import hashlib | import hashlib | ||||
import optparse | import optparse | ||||
import os | import os | ||||
import re | import re | ||||
import sys | import sys | ||||
# Python 3 adapters | # Python 3 adapters | ||||
ispy3 = (sys.version_info[0] >= 3) | ispy3 = sys.version_info[0] >= 3 | ||||
if ispy3: | if ispy3: | ||||
def iterbytes(s): | def iterbytes(s): | ||||
for i in range(len(s)): | for i in range(len(s)): | ||||
yield s[i:i + 1] | yield s[i : i + 1] | ||||
else: | else: | ||||
iterbytes = iter | iterbytes = iter | ||||
def visit(opts, filenames, outfile): | def visit(opts, filenames, outfile): | ||||
"""Process filenames in the way specified in opts, writing output to | """Process filenames in the way specified in opts, writing output to | ||||
outfile.""" | outfile.""" | ||||
for f in sorted(filenames): | for f in sorted(filenames): | ||||
isstdin = f == '-' | isstdin = f == '-' | ||||
if not isstdin and not os.path.lexists(f): | if not isstdin and not os.path.lexists(f): | ||||
outfile.write(b'%s: file not found\n' % f.encode('utf-8')) | outfile.write(b'%s: file not found\n' % f.encode('utf-8')) | ||||
continue | continue | ||||
facts.append(b'size=%d' % stat.st_size) | facts.append(b'size=%d' % stat.st_size) | ||||
if opts.mode and not islink: | if opts.mode and not islink: | ||||
facts.append(b'mode=%o' % (stat.st_mode & 0o777)) | facts.append(b'mode=%o' % (stat.st_mode & 0o777)) | ||||
if opts.links: | if opts.links: | ||||
facts.append(b'links=%d' % stat.st_nlink) | facts.append(b'links=%d' % stat.st_nlink) | ||||
if opts.newer: | if opts.newer: | ||||
# mtime might be in whole seconds so newer file might be same | # mtime might be in whole seconds so newer file might be same | ||||
if stat.st_mtime >= os.stat(opts.newer).st_mtime: | if stat.st_mtime >= os.stat(opts.newer).st_mtime: | ||||
facts.append(b'newer than %s' % opts.newer.encode( | facts.append( | ||||
'utf8', 'replace')) | b'newer than %s' % opts.newer.encode('utf8', 'replace') | ||||
) | |||||
else: | else: | ||||
facts.append(b'older than %s' % opts.newer.encode( | facts.append( | ||||
'utf8', 'replace')) | b'older than %s' % opts.newer.encode('utf8', 'replace') | ||||
) | |||||
if opts.md5 and content is not None: | if opts.md5 and content is not None: | ||||
h = hashlib.md5(content) | h = hashlib.md5(content) | ||||
facts.append(b'md5=%s' % binascii.hexlify(h.digest())[:opts.bytes]) | facts.append(b'md5=%s' % binascii.hexlify(h.digest())[: opts.bytes]) | ||||
if opts.sha1 and content is not None: | if opts.sha1 and content is not None: | ||||
h = hashlib.sha1(content) | h = hashlib.sha1(content) | ||||
facts.append(b'sha1=%s' % binascii.hexlify(h.digest())[:opts.bytes]) | facts.append( | ||||
b'sha1=%s' % binascii.hexlify(h.digest())[: opts.bytes] | |||||
) | |||||
if opts.sha256 and content is not None: | if opts.sha256 and content is not None: | ||||
h = hashlib.sha256(content) | h = hashlib.sha256(content) | ||||
facts.append(b'sha256=%s' % | facts.append( | ||||
binascii.hexlify(h.digest())[:opts.bytes]) | b'sha256=%s' % binascii.hexlify(h.digest())[: opts.bytes] | ||||
) | |||||
if isstdin: | if isstdin: | ||||
outfile.write(b', '.join(facts) + b'\n') | outfile.write(b', '.join(facts) + b'\n') | ||||
elif facts: | elif facts: | ||||
outfile.write(b'%s: %s\n' % (f.encode('utf-8'), b', '.join(facts))) | outfile.write(b'%s: %s\n' % (f.encode('utf-8'), b', '.join(facts))) | ||||
elif not quiet: | elif not quiet: | ||||
outfile.write(b'%s:\n' % f.encode('utf-8')) | outfile.write(b'%s:\n' % f.encode('utf-8')) | ||||
if content is not None: | if content is not None: | ||||
chunk = content | chunk = content | ||||
if not islink: | if not islink: | ||||
if opts.lines: | if opts.lines: | ||||
if opts.lines >= 0: | if opts.lines >= 0: | ||||
chunk = b''.join(chunk.splitlines(True)[:opts.lines]) | chunk = b''.join(chunk.splitlines(True)[: opts.lines]) | ||||
else: | else: | ||||
chunk = b''.join(chunk.splitlines(True)[opts.lines:]) | chunk = b''.join(chunk.splitlines(True)[opts.lines :]) | ||||
if opts.bytes: | if opts.bytes: | ||||
if opts.bytes >= 0: | if opts.bytes >= 0: | ||||
chunk = chunk[:opts.bytes] | chunk = chunk[: opts.bytes] | ||||
else: | else: | ||||
chunk = chunk[opts.bytes:] | chunk = chunk[opts.bytes :] | ||||
if opts.hexdump: | if opts.hexdump: | ||||
for i in range(0, len(chunk), 16): | for i in range(0, len(chunk), 16): | ||||
s = chunk[i:i + 16] | s = chunk[i : i + 16] | ||||
outfile.write(b'%04x: %-47s |%s|\n' % | outfile.write( | ||||
(i, b' '.join( | b'%04x: %-47s |%s|\n' | ||||
b'%02x' % ord(c) for c in iterbytes(s)), | % ( | ||||
re.sub(b'[^ -~]', b'.', s))) | i, | ||||
b' '.join(b'%02x' % ord(c) for c in iterbytes(s)), | |||||
re.sub(b'[^ -~]', b'.', s), | |||||
) | |||||
) | |||||
if opts.dump: | if opts.dump: | ||||
if not quiet: | if not quiet: | ||||
outfile.write(b'>>>\n') | outfile.write(b'>>>\n') | ||||
outfile.write(chunk) | outfile.write(chunk) | ||||
if not quiet: | if not quiet: | ||||
if chunk.endswith(b'\n'): | if chunk.endswith(b'\n'): | ||||
outfile.write(b'<<<\n') | outfile.write(b'<<<\n') | ||||
else: | else: | ||||
outfile.write(b'\n<<< no trailing newline\n') | outfile.write(b'\n<<< no trailing newline\n') | ||||
if opts.recurse and dirfiles: | if opts.recurse and dirfiles: | ||||
assert not isstdin | assert not isstdin | ||||
visit(opts, dirfiles, outfile) | visit(opts, dirfiles, outfile) | ||||
if __name__ == "__main__": | if __name__ == "__main__": | ||||
parser = optparse.OptionParser("%prog [options] [filenames]") | parser = optparse.OptionParser("%prog [options] [filenames]") | ||||
parser.add_option("-t", "--type", action="store_true", | parser.add_option( | ||||
help="show file type (file or directory)") | "-t", | ||||
parser.add_option("-m", "--mode", action="store_true", | "--type", | ||||
help="show file mode") | action="store_true", | ||||
parser.add_option("-l", "--links", action="store_true", | help="show file type (file or directory)", | ||||
help="show number of links") | ) | ||||
parser.add_option("-s", "--size", action="store_true", | parser.add_option( | ||||
help="show size of file") | "-m", "--mode", action="store_true", help="show file mode" | ||||
parser.add_option("-n", "--newer", action="store", | ) | ||||
help="check if file is newer (or same)") | parser.add_option( | ||||
parser.add_option("-r", "--recurse", action="store_true", | "-l", "--links", action="store_true", help="show number of links" | ||||
help="recurse into directories") | ) | ||||
parser.add_option("-S", "--sha1", action="store_true", | parser.add_option( | ||||
help="show sha1 hash of the content") | "-s", "--size", action="store_true", help="show size of file" | ||||
parser.add_option("", "--sha256", action="store_true", | ) | ||||
help="show sha256 hash of the content") | parser.add_option( | ||||
parser.add_option("-M", "--md5", action="store_true", | "-n", "--newer", action="store", help="check if file is newer (or same)" | ||||
help="show md5 hash of the content") | ) | ||||
parser.add_option("-D", "--dump", action="store_true", | parser.add_option( | ||||
help="dump file content") | "-r", "--recurse", action="store_true", help="recurse into directories" | ||||
parser.add_option("-H", "--hexdump", action="store_true", | ) | ||||
help="hexdump file content") | parser.add_option( | ||||
parser.add_option("-B", "--bytes", type="int", | "-S", | ||||
help="number of characters to dump") | "--sha1", | ||||
parser.add_option("-L", "--lines", type="int", | action="store_true", | ||||
help="number of lines to dump") | help="show sha1 hash of the content", | ||||
parser.add_option("-q", "--quiet", action="store_true", | ) | ||||
help="no default output") | parser.add_option( | ||||
"", | |||||
"--sha256", | |||||
action="store_true", | |||||
help="show sha256 hash of the content", | |||||
) | |||||
parser.add_option( | |||||
"-M", "--md5", action="store_true", help="show md5 hash of the content" | |||||
) | |||||
parser.add_option( | |||||
"-D", "--dump", action="store_true", help="dump file content" | |||||
) | |||||
parser.add_option( | |||||
"-H", "--hexdump", action="store_true", help="hexdump file content" | |||||
) | |||||
parser.add_option( | |||||
"-B", "--bytes", type="int", help="number of characters to dump" | |||||
) | |||||
parser.add_option( | |||||
"-L", "--lines", type="int", help="number of lines to dump" | |||||
) | |||||
parser.add_option( | |||||
"-q", "--quiet", action="store_true", help="no default output" | |||||
) | |||||
(opts, filenames) = parser.parse_args(sys.argv[1:]) | (opts, filenames) = parser.parse_args(sys.argv[1:]) | ||||
if not filenames: | if not filenames: | ||||
filenames = ['-'] | filenames = ['-'] | ||||
visit(opts, filenames, getattr(sys.stdout, 'buffer', sys.stdout)) | visit(opts, filenames, getattr(sys.stdout, 'buffer', sys.stdout)) |
#!/usr/bin/env python | #!/usr/bin/env python | ||||
"""Test the running system for features availability. Exit with zero | """Test the running system for features availability. Exit with zero | ||||
if all features are there, non-zero otherwise. If a feature name is | if all features are there, non-zero otherwise. If a feature name is | ||||
prefixed with "no-", the absence of feature is tested. | prefixed with "no-", the absence of feature is tested. | ||||
""" | """ | ||||
from __future__ import absolute_import, print_function | from __future__ import absolute_import, print_function | ||||
import hghave | import hghave | ||||
import optparse | import optparse | ||||
import os | import os | ||||
import sys | import sys | ||||
checks = hghave.checks | checks = hghave.checks | ||||
def list_features(): | def list_features(): | ||||
for name, feature in sorted(checks.items()): | for name, feature in sorted(checks.items()): | ||||
desc = feature[1] | desc = feature[1] | ||||
print(name + ':', desc) | print(name + ':', desc) | ||||
def test_features(): | def test_features(): | ||||
failed = 0 | failed = 0 | ||||
for name, feature in checks.items(): | for name, feature in checks.items(): | ||||
check, _ = feature | check, _ = feature | ||||
try: | try: | ||||
check() | check() | ||||
except Exception as e: | except Exception as e: | ||||
print("feature %s failed: %s" % (name, e)) | print("feature %s failed: %s" % (name, e)) | ||||
failed += 1 | failed += 1 | ||||
return failed | return failed | ||||
parser = optparse.OptionParser("%prog [options] [features]") | parser = optparse.OptionParser("%prog [options] [features]") | ||||
parser.add_option("--test-features", action="store_true", | parser.add_option( | ||||
help="test available features") | "--test-features", action="store_true", help="test available features" | ||||
parser.add_option("--list-features", action="store_true", | ) | ||||
help="list available features") | parser.add_option( | ||||
"--list-features", action="store_true", help="list available features" | |||||
) | |||||
def _loadaddon(): | def _loadaddon(): | ||||
if 'TESTDIR' in os.environ: | if 'TESTDIR' in os.environ: | ||||
# loading from '.' isn't needed, because `hghave` should be | # loading from '.' isn't needed, because `hghave` should be | ||||
# running at TESTTMP in this case | # running at TESTTMP in this case | ||||
path = os.environ['TESTDIR'] | path = os.environ['TESTDIR'] | ||||
else: | else: | ||||
path = '.' | path = '.' | ||||
if not os.path.exists(os.path.join(path, 'hghaveaddon.py')): | if not os.path.exists(os.path.join(path, 'hghaveaddon.py')): | ||||
return | return | ||||
sys.path.insert(0, path) | sys.path.insert(0, path) | ||||
try: | try: | ||||
import hghaveaddon | import hghaveaddon | ||||
assert hghaveaddon # silence pyflakes | assert hghaveaddon # silence pyflakes | ||||
except BaseException as inst: | except BaseException as inst: | ||||
sys.stderr.write('failed to import hghaveaddon.py from %r: %s\n' | sys.stderr.write( | ||||
% (path, inst)) | 'failed to import hghaveaddon.py from %r: %s\n' % (path, inst) | ||||
) | |||||
sys.exit(2) | sys.exit(2) | ||||
sys.path.pop(0) | sys.path.pop(0) | ||||
if __name__ == '__main__': | if __name__ == '__main__': | ||||
options, args = parser.parse_args() | options, args = parser.parse_args() | ||||
_loadaddon() | _loadaddon() | ||||
if options.list_features: | if options.list_features: | ||||
list_features() | list_features() | ||||
sys.exit(0) | sys.exit(0) | ||||
if options.test_features: | if options.test_features: | ||||
sys.exit(test_features()) | sys.exit(test_features()) | ||||
hghave.require(args) | hghave.require(args) |