This required a couple of small tweaks to un-confuse black, but now it
works. Big formatting changes come from:
- Dramatically improved collection-splitting logic upstream
- Black having a strong (correct IMO) opinion that """ is better than '''
( )
indygreg | |
martinvonz | |
mharbison72 |
hg-reviewers |
This required a couple of small tweaks to un-confuse black, but now it
works. Big formatting changes come from:
Automatic diff as part of commit; lint not applicable. |
Automatic diff as part of commit; unit tests not applicable. |
Amended this into setup.py, which was changed in D9427 with the older black:
@@ -816,7 +816,8 @@ class buildhgexe(build_ext): if sys.version_info[0] >= 3: fsdecode = os.fsdecode dest = os.path.join( - os.path.dirname(self.hgtarget), fsdecode(dllbasename), + os.path.dirname(self.hgtarget), + fsdecode(dllbasename), ) if not os.path.exists(dest):
"Action": "sts:AssumeRole" | "Action": "sts:AssumeRole" | ||||
} | } | ||||
] | ] | ||||
} | } | ||||
'''.strip() | '''.strip() | ||||
IAM_INSTANCE_PROFILES = { | IAM_INSTANCE_PROFILES = { | ||||
'ephemeral-ec2-1': {'roles': ['ephemeral-ec2-role-1',],} | 'ephemeral-ec2-1': { | ||||
'roles': [ | |||||
'ephemeral-ec2-role-1', | |||||
], | |||||
} | |||||
} | } | ||||
# User Data for Windows EC2 instance. Mainly used to set the password | # User Data for Windows EC2 instance. Mainly used to set the password | ||||
# and configure WinRM. | # and configure WinRM. | ||||
# Inspired by the User Data script used by Packer | # Inspired by the User Data script used by Packer | ||||
# (from https://www.packer.io/intro/getting-started/build-image.html). | # (from https://www.packer.io/intro/getting-started/build-image.html). | ||||
WINDOWS_USER_DATA = r''' | WINDOWS_USER_DATA = r''' | ||||
profile.add_role(RoleName=role) | profile.add_role(RoleName=role) | ||||
def find_image(ec2resource, owner_id, name, reverse_sort_field=None): | def find_image(ec2resource, owner_id, name, reverse_sort_field=None): | ||||
"""Find an AMI by its owner ID and name.""" | """Find an AMI by its owner ID and name.""" | ||||
images = ec2resource.images.filter( | images = ec2resource.images.filter( | ||||
Filters=[ | Filters=[ | ||||
{'Name': 'owner-id', 'Values': [owner_id],}, | { | ||||
{'Name': 'state', 'Values': ['available'],}, | 'Name': 'owner-id', | ||||
{'Name': 'image-type', 'Values': ['machine'],}, | 'Values': [owner_id], | ||||
{'Name': 'name', 'Values': [name],}, | }, | ||||
{ | |||||
'Name': 'state', | |||||
'Values': ['available'], | |||||
}, | |||||
{ | |||||
'Name': 'image-type', | |||||
'Values': ['machine'], | |||||
}, | |||||
{ | |||||
'Name': 'name', | |||||
'Values': [name], | |||||
}, | |||||
] | ] | ||||
) | ) | ||||
if reverse_sort_field: | if reverse_sort_field: | ||||
images = sorted( | images = sorted( | ||||
images, | images, | ||||
key=lambda image: getattr(image, reverse_sort_field), | key=lambda image: getattr(image, reverse_sort_field), | ||||
reverse=True, | reverse=True, | ||||
if name in existing: | if name in existing: | ||||
security_groups[name] = existing[name] | security_groups[name] = existing[name] | ||||
continue | continue | ||||
actual = '%s%s' % (prefix, name) | actual = '%s%s' % (prefix, name) | ||||
print('adding security group %s' % actual) | print('adding security group %s' % actual) | ||||
group_res = ec2resource.create_security_group( | group_res = ec2resource.create_security_group( | ||||
Description=group['description'], GroupName=actual, | Description=group['description'], | ||||
GroupName=actual, | |||||
) | ) | ||||
group_res.authorize_ingress(IpPermissions=group['ingress'],) | group_res.authorize_ingress( | ||||
IpPermissions=group['ingress'], | |||||
) | |||||
security_groups[name] = group_res | security_groups[name] = group_res | ||||
return security_groups | return security_groups | ||||
def terminate_ec2_instances(ec2resource, prefix='hg-'): | def terminate_ec2_instances(ec2resource, prefix='hg-'): | ||||
"""Terminate all EC2 instances managed by us.""" | """Terminate all EC2 instances managed by us.""" | ||||
snapshot.delete() | snapshot.delete() | ||||
def wait_for_ssm(ssmclient, instances): | def wait_for_ssm(ssmclient, instances): | ||||
"""Wait for SSM to come online for an iterable of instance IDs.""" | """Wait for SSM to come online for an iterable of instance IDs.""" | ||||
while True: | while True: | ||||
res = ssmclient.describe_instance_information( | res = ssmclient.describe_instance_information( | ||||
Filters=[ | Filters=[ | ||||
{'Key': 'InstanceIds', 'Values': [i.id for i in instances],}, | { | ||||
'Key': 'InstanceIds', | |||||
'Values': [i.id for i in instances], | |||||
}, | |||||
], | ], | ||||
) | ) | ||||
available = len(res['InstanceInformationList']) | available = len(res['InstanceInformationList']) | ||||
wanted = len(instances) | wanted = len(instances) | ||||
print('%d/%d instances available in SSM' % (available, wanted)) | print('%d/%d instances available in SSM' % (available, wanted)) | ||||
if available == wanted: | if available == wanted: | ||||
return | return | ||||
time.sleep(2) | time.sleep(2) | ||||
def run_ssm_command(ssmclient, instances, document_name, parameters): | def run_ssm_command(ssmclient, instances, document_name, parameters): | ||||
"""Run a PowerShell script on an EC2 instance.""" | """Run a PowerShell script on an EC2 instance.""" | ||||
res = ssmclient.send_command( | res = ssmclient.send_command( | ||||
InstanceIds=[i.id for i in instances], | InstanceIds=[i.id for i in instances], | ||||
DocumentName=document_name, | DocumentName=document_name, | ||||
Parameters=parameters, | Parameters=parameters, | ||||
CloudWatchOutputConfig={'CloudWatchOutputEnabled': True,}, | CloudWatchOutputConfig={ | ||||
'CloudWatchOutputEnabled': True, | |||||
}, | |||||
) | ) | ||||
command_id = res['Command']['CommandId'] | command_id = res['Command']['CommandId'] | ||||
for instance in instances: | for instance in instances: | ||||
while True: | while True: | ||||
try: | try: | ||||
res = ssmclient.get_command_invocation( | res = ssmclient.get_command_invocation( | ||||
CommandId=command_id, InstanceId=instance.id, | CommandId=command_id, | ||||
InstanceId=instance.id, | |||||
) | ) | ||||
except botocore.exceptions.ClientError as e: | except botocore.exceptions.ClientError as e: | ||||
if e.response['Error']['Code'] == 'InvocationDoesNotExist': | if e.response['Error']['Code'] == 'InvocationDoesNotExist': | ||||
print('could not find SSM command invocation; waiting') | print('could not find SSM command invocation; waiting') | ||||
time.sleep(1) | time.sleep(1) | ||||
continue | continue | ||||
else: | else: | ||||
raise | raise | ||||
): | ): | ||||
"""Create an AMI from a running instance. | """Create an AMI from a running instance. | ||||
Returns the ``ec2resource.Image`` representing the created AMI. | Returns the ``ec2resource.Image`` representing the created AMI. | ||||
""" | """ | ||||
instance.stop() | instance.stop() | ||||
ec2client.get_waiter('instance_stopped').wait( | ec2client.get_waiter('instance_stopped').wait( | ||||
InstanceIds=[instance.id], WaiterConfig={'Delay': 5,} | InstanceIds=[instance.id], | ||||
WaiterConfig={ | |||||
'Delay': 5, | |||||
}, | |||||
) | ) | ||||
print('%s is stopped' % instance.id) | print('%s is stopped' % instance.id) | ||||
image = instance.create_image(Name=name, Description=description,) | image = instance.create_image( | ||||
Name=name, | |||||
Description=description, | |||||
) | |||||
image.create_tags( | image.create_tags( | ||||
Tags=[{'Key': 'HGIMAGEFINGERPRINT', 'Value': fingerprint,},] | Tags=[ | ||||
{ | |||||
'Key': 'HGIMAGEFINGERPRINT', | |||||
'Value': fingerprint, | |||||
}, | |||||
] | |||||
) | ) | ||||
print('waiting for image %s' % image.id) | print('waiting for image %s' % image.id) | ||||
ec2client.get_waiter('image_available').wait(ImageIds=[image.id],) | ec2client.get_waiter('image_available').wait( | ||||
ImageIds=[image.id], | |||||
) | |||||
print('image %s available as %s' % (image.id, image.name)) | print('image %s available as %s' % (image.id, image.name)) | ||||
return image | return image | ||||
def ensure_linux_dev_ami(c: AWSConnection, distro='debian10', prefix='hg-'): | def ensure_linux_dev_ami(c: AWSConnection, distro='debian10', prefix='hg-'): | ||||
"""Ensures a Linux development AMI is available and up-to-date. | """Ensures a Linux development AMI is available and up-to-date. | ||||
image = find_image( | image = find_image( | ||||
ec2resource, | ec2resource, | ||||
DEBIAN_ACCOUNT_ID, | DEBIAN_ACCOUNT_ID, | ||||
'debian-stretch-hvm-x86_64-gp2-2019-09-08-17994', | 'debian-stretch-hvm-x86_64-gp2-2019-09-08-17994', | ||||
) | ) | ||||
ssh_username = 'admin' | ssh_username = 'admin' | ||||
elif distro == 'debian10': | elif distro == 'debian10': | ||||
image = find_image( | image = find_image( | ||||
ec2resource, DEBIAN_ACCOUNT_ID_2, 'debian-10-amd64-20190909-10', | ec2resource, | ||||
DEBIAN_ACCOUNT_ID_2, | |||||
'debian-10-amd64-20190909-10', | |||||
) | ) | ||||
ssh_username = 'admin' | ssh_username = 'admin' | ||||
elif distro == 'ubuntu18.04': | elif distro == 'ubuntu18.04': | ||||
image = find_image( | image = find_image( | ||||
ec2resource, | ec2resource, | ||||
UBUNTU_ACCOUNT_ID, | UBUNTU_ACCOUNT_ID, | ||||
'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20190918', | 'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20190918', | ||||
) | ) | ||||
try: | try: | ||||
yield instances | yield instances | ||||
finally: | finally: | ||||
for instance in instances: | for instance in instances: | ||||
instance.ssh_client.close() | instance.ssh_client.close() | ||||
def ensure_windows_dev_ami( | def ensure_windows_dev_ami( | ||||
c: AWSConnection, prefix='hg-', base_image_name=WINDOWS_BASE_IMAGE_NAME, | c: AWSConnection, | ||||
prefix='hg-', | |||||
base_image_name=WINDOWS_BASE_IMAGE_NAME, | |||||
): | ): | ||||
"""Ensure Windows Development AMI is available and up-to-date. | """Ensure Windows Development AMI is available and up-to-date. | ||||
If necessary, a modern AMI will be built by starting a temporary EC2 | If necessary, a modern AMI will be built by starting a temporary EC2 | ||||
instance and bootstrapping it. | instance and bootstrapping it. | ||||
Obsolete AMIs will be deleted so there is only a single AMI having the | Obsolete AMIs will be deleted so there is only a single AMI having the | ||||
desired name. | desired name. | ||||
# trust issues that make it difficult to invoke Windows Update | # trust issues that make it difficult to invoke Windows Update | ||||
# remotely. So we use SSM, which has a mechanism for running Windows | # remotely. So we use SSM, which has a mechanism for running Windows | ||||
# Update. | # Update. | ||||
print('installing Windows features...') | print('installing Windows features...') | ||||
run_ssm_command( | run_ssm_command( | ||||
ssmclient, | ssmclient, | ||||
[instance], | [instance], | ||||
'AWS-RunPowerShellScript', | 'AWS-RunPowerShellScript', | ||||
{'commands': WINDOWS_BOOTSTRAP_POWERSHELL.split('\n'),}, | { | ||||
'commands': WINDOWS_BOOTSTRAP_POWERSHELL.split('\n'), | |||||
}, | |||||
) | ) | ||||
# Reboot so all updates are fully applied. | # Reboot so all updates are fully applied. | ||||
# | # | ||||
# We don't use instance.reboot() here because it is asynchronous and | # We don't use instance.reboot() here because it is asynchronous and | ||||
# we don't know when exactly the instance has rebooted. It could take | # we don't know when exactly the instance has rebooted. It could take | ||||
# a while to stop and we may start trying to interact with the instance | # a while to stop and we may start trying to interact with the instance | ||||
# before it has rebooted. | # before it has rebooted. | ||||
print('rebooting instance %s' % instance.id) | print('rebooting instance %s' % instance.id) | ||||
instance.stop() | instance.stop() | ||||
ec2client.get_waiter('instance_stopped').wait( | ec2client.get_waiter('instance_stopped').wait( | ||||
InstanceIds=[instance.id], WaiterConfig={'Delay': 5,} | InstanceIds=[instance.id], | ||||
WaiterConfig={ | |||||
'Delay': 5, | |||||
}, | |||||
) | ) | ||||
instance.start() | instance.start() | ||||
wait_for_ip_addresses([instance]) | wait_for_ip_addresses([instance]) | ||||
# There is a race condition here between the User Data PS script running | # There is a race condition here between the User Data PS script running | ||||
# and us connecting to WinRM. This can manifest as | # and us connecting to WinRM. This can manifest as | ||||
# "AuthorizationManager check failed" failures during run_powershell(). | # "AuthorizationManager check failed" failures during run_powershell(). |
parser = argparse.ArgumentParser() | parser = argparse.ArgumentParser() | ||||
parser.add_argument( | parser.add_argument( | ||||
'--state-path', | '--state-path', | ||||
default='~/.hgautomation', | default='~/.hgautomation', | ||||
help='Path for local state files', | help='Path for local state files', | ||||
) | ) | ||||
parser.add_argument( | parser.add_argument( | ||||
'--aws-region', help='AWS region to use', default='us-west-2', | '--aws-region', | ||||
help='AWS region to use', | |||||
default='us-west-2', | |||||
) | ) | ||||
subparsers = parser.add_subparsers() | subparsers = parser.add_subparsers() | ||||
sp = subparsers.add_parser( | sp = subparsers.add_parser( | ||||
'bootstrap-linux-dev', help='Bootstrap Linux development environments', | 'bootstrap-linux-dev', | ||||
help='Bootstrap Linux development environments', | |||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--distros', help='Comma delimited list of distros to bootstrap', | '--distros', | ||||
help='Comma delimited list of distros to bootstrap', | |||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--parallel', | '--parallel', | ||||
action='store_true', | action='store_true', | ||||
help='Generate AMIs in parallel (not CTRL-c safe)', | help='Generate AMIs in parallel (not CTRL-c safe)', | ||||
) | ) | ||||
sp.set_defaults(func=bootstrap_linux_dev) | sp.set_defaults(func=bootstrap_linux_dev) | ||||
sp = subparsers.add_parser( | sp = subparsers.add_parser( | ||||
'bootstrap-windows-dev', | 'bootstrap-windows-dev', | ||||
help='Bootstrap the Windows development environment', | help='Bootstrap the Windows development environment', | ||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--base-image-name', | '--base-image-name', | ||||
help='AMI name of base image', | help='AMI name of base image', | ||||
default=aws.WINDOWS_BASE_IMAGE_NAME, | default=aws.WINDOWS_BASE_IMAGE_NAME, | ||||
) | ) | ||||
sp.set_defaults(func=bootstrap_windows_dev) | sp.set_defaults(func=bootstrap_windows_dev) | ||||
sp = subparsers.add_parser( | sp = subparsers.add_parser( | ||||
'build-all-windows-packages', help='Build all Windows packages', | 'build-all-windows-packages', | ||||
help='Build all Windows packages', | |||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--revision', help='Mercurial revision to build', default='.', | '--revision', | ||||
help='Mercurial revision to build', | |||||
default='.', | |||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--version', help='Mercurial version string to use', | '--version', | ||||
help='Mercurial version string to use', | |||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--base-image-name', | '--base-image-name', | ||||
help='AMI name of base image', | help='AMI name of base image', | ||||
default=aws.WINDOWS_BASE_IMAGE_NAME, | default=aws.WINDOWS_BASE_IMAGE_NAME, | ||||
) | ) | ||||
sp.set_defaults(func=build_all_windows_packages) | sp.set_defaults(func=build_all_windows_packages) | ||||
sp = subparsers.add_parser( | sp = subparsers.add_parser( | ||||
'build-inno', help='Build Inno Setup installer(s)', | 'build-inno', | ||||
help='Build Inno Setup installer(s)', | |||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--python-version', | '--python-version', | ||||
help='Which version of Python to target', | help='Which version of Python to target', | ||||
choices={2, 3}, | choices={2, 3}, | ||||
type=int, | type=int, | ||||
nargs='*', | nargs='*', | ||||
default=[3], | default=[3], | ||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--arch', | '--arch', | ||||
help='Architecture to build for', | help='Architecture to build for', | ||||
choices={'x86', 'x64'}, | choices={'x86', 'x64'}, | ||||
nargs='*', | nargs='*', | ||||
default=['x64'], | default=['x64'], | ||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--revision', help='Mercurial revision to build', default='.', | '--revision', | ||||
help='Mercurial revision to build', | |||||
default='.', | |||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--version', help='Mercurial version string to use in installer', | '--version', | ||||
help='Mercurial version string to use in installer', | |||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--base-image-name', | '--base-image-name', | ||||
help='AMI name of base image', | help='AMI name of base image', | ||||
default=aws.WINDOWS_BASE_IMAGE_NAME, | default=aws.WINDOWS_BASE_IMAGE_NAME, | ||||
) | ) | ||||
sp.set_defaults(func=build_inno) | sp.set_defaults(func=build_inno) | ||||
sp = subparsers.add_parser( | sp = subparsers.add_parser( | ||||
'build-windows-wheel', help='Build Windows wheel(s)', | 'build-windows-wheel', | ||||
help='Build Windows wheel(s)', | |||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--python-version', | '--python-version', | ||||
help='Python version to build for', | help='Python version to build for', | ||||
choices={'2.7', '3.7', '3.8', '3.9'}, | choices={'2.7', '3.7', '3.8', '3.9'}, | ||||
nargs='*', | nargs='*', | ||||
default=['3.8'], | default=['3.8'], | ||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--arch', | '--arch', | ||||
help='Architecture to build for', | help='Architecture to build for', | ||||
choices={'x86', 'x64'}, | choices={'x86', 'x64'}, | ||||
nargs='*', | nargs='*', | ||||
default=['x64'], | default=['x64'], | ||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--revision', help='Mercurial revision to build', default='.', | '--revision', | ||||
help='Mercurial revision to build', | |||||
default='.', | |||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--base-image-name', | '--base-image-name', | ||||
help='AMI name of base image', | help='AMI name of base image', | ||||
default=aws.WINDOWS_BASE_IMAGE_NAME, | default=aws.WINDOWS_BASE_IMAGE_NAME, | ||||
) | ) | ||||
sp.set_defaults(func=build_windows_wheel) | sp.set_defaults(func=build_windows_wheel) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--arch', | '--arch', | ||||
help='Architecture to build for', | help='Architecture to build for', | ||||
choices={'x86', 'x64'}, | choices={'x86', 'x64'}, | ||||
nargs='*', | nargs='*', | ||||
default=['x64'], | default=['x64'], | ||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--revision', help='Mercurial revision to build', default='.', | '--revision', | ||||
help='Mercurial revision to build', | |||||
default='.', | |||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--version', help='Mercurial version string to use in installer', | '--version', | ||||
help='Mercurial version string to use in installer', | |||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--base-image-name', | '--base-image-name', | ||||
help='AMI name of base image', | help='AMI name of base image', | ||||
default=aws.WINDOWS_BASE_IMAGE_NAME, | default=aws.WINDOWS_BASE_IMAGE_NAME, | ||||
) | ) | ||||
sp.set_defaults(func=build_wix) | sp.set_defaults(func=build_wix) | ||||
sp = subparsers.add_parser( | sp = subparsers.add_parser( | ||||
'terminate-ec2-instances', | 'terminate-ec2-instances', | ||||
help='Terminate all active EC2 instances managed by us', | help='Terminate all active EC2 instances managed by us', | ||||
) | ) | ||||
sp.set_defaults(func=terminate_ec2_instances) | sp.set_defaults(func=terminate_ec2_instances) | ||||
sp = subparsers.add_parser( | sp = subparsers.add_parser( | ||||
'purge-ec2-resources', help='Purge all EC2 resources managed by us', | 'purge-ec2-resources', | ||||
help='Purge all EC2 resources managed by us', | |||||
) | ) | ||||
sp.set_defaults(func=purge_ec2_resources) | sp.set_defaults(func=purge_ec2_resources) | ||||
sp = subparsers.add_parser('run-tests-linux', help='Run tests on Linux',) | sp = subparsers.add_parser( | ||||
'run-tests-linux', | |||||
help='Run tests on Linux', | |||||
) | |||||
sp.add_argument( | sp.add_argument( | ||||
'--distro', | '--distro', | ||||
help='Linux distribution to run tests on', | help='Linux distribution to run tests on', | ||||
choices=linux.DISTROS, | choices=linux.DISTROS, | ||||
default='debian10', | default='debian10', | ||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--filesystem', | '--filesystem', | ||||
sp.add_argument( | sp.add_argument( | ||||
'test_flags', | 'test_flags', | ||||
help='Extra command line flags to pass to run-tests.py', | help='Extra command line flags to pass to run-tests.py', | ||||
nargs='*', | nargs='*', | ||||
) | ) | ||||
sp.set_defaults(func=run_tests_linux) | sp.set_defaults(func=run_tests_linux) | ||||
sp = subparsers.add_parser( | sp = subparsers.add_parser( | ||||
'run-tests-windows', help='Run tests on Windows', | 'run-tests-windows', | ||||
help='Run tests on Windows', | |||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--instance-type', help='EC2 instance type to use', default='t3.medium', | '--instance-type', | ||||
help='EC2 instance type to use', | |||||
default='t3.medium', | |||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--python-version', | '--python-version', | ||||
help='Python version to use', | help='Python version to use', | ||||
choices={'2.7', '3.5', '3.6', '3.7', '3.8', '3.9'}, | choices={'2.7', '3.5', '3.6', '3.7', '3.8', '3.9'}, | ||||
default='2.7', | default='2.7', | ||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--arch', | '--arch', | ||||
help='Architecture to test', | help='Architecture to test', | ||||
choices={'x86', 'x64'}, | choices={'x86', 'x64'}, | ||||
default='x64', | default='x64', | ||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--test-flags', help='Extra command line flags to pass to run-tests.py', | '--test-flags', | ||||
help='Extra command line flags to pass to run-tests.py', | |||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--base-image-name', | '--base-image-name', | ||||
help='AMI name of base image', | help='AMI name of base image', | ||||
default=aws.WINDOWS_BASE_IMAGE_NAME, | default=aws.WINDOWS_BASE_IMAGE_NAME, | ||||
) | ) | ||||
sp.set_defaults(func=run_tests_windows) | sp.set_defaults(func=run_tests_windows) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--no-mercurial-scm-org', | '--no-mercurial-scm-org', | ||||
dest='mercurial_scm_org', | dest='mercurial_scm_org', | ||||
action='store_false', | action='store_false', | ||||
default=True, | default=True, | ||||
help='Skip uploading to www.mercurial-scm.org', | help='Skip uploading to www.mercurial-scm.org', | ||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'--ssh-username', help='SSH username for mercurial-scm.org', | '--ssh-username', | ||||
help='SSH username for mercurial-scm.org', | |||||
) | ) | ||||
sp.add_argument( | sp.add_argument( | ||||
'version', help='Mercurial version string to locate local packages', | 'version', | ||||
help='Mercurial version string to locate local packages', | |||||
) | ) | ||||
sp.set_defaults(func=publish_windows_artifacts) | sp.set_defaults(func=publish_windows_artifacts) | ||||
sp = subparsers.add_parser( | sp = subparsers.add_parser( | ||||
'try', help='Run CI automation against a custom changeset' | 'try', help='Run CI automation against a custom changeset' | ||||
) | ) | ||||
sp.add_argument('-r', '--rev', default='.', help='Revision to run CI on') | sp.add_argument('-r', '--rev', default='.', help='Revision to run CI on') | ||||
sp.set_defaults(func=run_try) | sp.set_defaults(func=run_try) |
if arch == "x86": | if arch == "x86": | ||||
target_triple = "i686-pc-windows-msvc" | target_triple = "i686-pc-windows-msvc" | ||||
elif arch == "x64": | elif arch == "x64": | ||||
target_triple = "x86_64-pc-windows-msvc" | target_triple = "x86_64-pc-windows-msvc" | ||||
else: | else: | ||||
raise Exception("unhandled arch: %s" % arch) | raise Exception("unhandled arch: %s" % arch) | ||||
ps = BUILD_INNO_PYTHON3.format( | ps = BUILD_INNO_PYTHON3.format( | ||||
pyoxidizer_target=target_triple, version=version, | pyoxidizer_target=target_triple, | ||||
version=version, | |||||
) | ) | ||||
else: | else: | ||||
extra_args = [] | extra_args = [] | ||||
if version: | if version: | ||||
extra_args.extend(['--version', version]) | extra_args.extend(['--version', version]) | ||||
ps = get_vc_prefix(arch) + BUILD_INNO_PYTHON2.format( | ps = get_vc_prefix(arch) + BUILD_INNO_PYTHON2.format( | ||||
arch=arch, extra_args=' '.join(extra_args) | arch=arch, extra_args=' '.join(extra_args) | ||||
if arch == "x86": | if arch == "x86": | ||||
target_triple = "i686-pc-windows-msvc" | target_triple = "i686-pc-windows-msvc" | ||||
elif arch == "x64": | elif arch == "x64": | ||||
target_triple = "x86_64-pc-windows-msvc" | target_triple = "x86_64-pc-windows-msvc" | ||||
else: | else: | ||||
raise Exception("unhandled arch: %s" % arch) | raise Exception("unhandled arch: %s" % arch) | ||||
ps = BUILD_WIX_PYTHON3.format( | ps = BUILD_WIX_PYTHON3.format( | ||||
pyoxidizer_target=target_triple, version=version, | pyoxidizer_target=target_triple, | ||||
version=version, | |||||
) | ) | ||||
else: | else: | ||||
extra_args = [] | extra_args = [] | ||||
if version: | if version: | ||||
extra_args.extend(['--version', version]) | extra_args.extend(['--version', version]) | ||||
ps = get_vc_prefix(arch) + BUILD_WIX_PYTHON2.format( | ps = get_vc_prefix(arch) + BUILD_WIX_PYTHON2.format( | ||||
arch=arch, extra_args=' '.join(extra_args) | arch=arch, extra_args=' '.join(extra_args) | ||||
r'python_version must be \d.\d; got %s' % python_version | r'python_version must be \d.\d; got %s' % python_version | ||||
) | ) | ||||
if arch not in ('x86', 'x64'): | if arch not in ('x86', 'x64'): | ||||
raise ValueError('arch must be x86 or x64; got %s' % arch) | raise ValueError('arch must be x86 or x64; got %s' % arch) | ||||
python_path = 'python%s-%s' % (python_version.replace('.', ''), arch) | python_path = 'python%s-%s' % (python_version.replace('.', ''), arch) | ||||
ps = RUN_TESTS.format(python_path=python_path, test_flags=test_flags or '',) | ps = RUN_TESTS.format( | ||||
python_path=python_path, | |||||
test_flags=test_flags or '', | |||||
) | |||||
run_powershell(winrm_client, ps) | run_powershell(winrm_client, ps) | ||||
def resolve_wheel_artifacts(dist_path: pathlib.Path, version: str): | def resolve_wheel_artifacts(dist_path: pathlib.Path, version: str): | ||||
return ( | return ( | ||||
dist_path / WHEEL_FILENAME_PYTHON27_X86.format(version=version), | dist_path / WHEEL_FILENAME_PYTHON27_X86.format(version=version), | ||||
dist_path / WHEEL_FILENAME_PYTHON27_X64.format(version=version), | dist_path / WHEEL_FILENAME_PYTHON27_X64.format(version=version), |
coldelta += 1 | coldelta += 1 | ||||
continue | continue | ||||
# This looks like a function call. | # This looks like a function call. | ||||
if t.type == token.NAME and _isop(i + 1, '('): | if t.type == token.NAME and _isop(i + 1, '('): | ||||
fn = t.string | fn = t.string | ||||
# *attr() builtins don't accept byte strings to 2nd argument. | # *attr() builtins don't accept byte strings to 2nd argument. | ||||
if fn in ( | if ( | ||||
fn | |||||
in ( | |||||
'getattr', | 'getattr', | ||||
'setattr', | 'setattr', | ||||
'hasattr', | 'hasattr', | ||||
'safehasattr', | 'safehasattr', | ||||
'wrapfunction', | 'wrapfunction', | ||||
'wrapclass', | 'wrapclass', | ||||
'addattr', | 'addattr', | ||||
) and (opts['allow-attr-methods'] or not _isop(i - 1, '.')): | ) | ||||
and (opts['allow-attr-methods'] or not _isop(i - 1, '.')) | |||||
): | |||||
arg1idx = _findargnofcall(1) | arg1idx = _findargnofcall(1) | ||||
if arg1idx is not None: | if arg1idx is not None: | ||||
_ensuresysstr(arg1idx) | _ensuresysstr(arg1idx) | ||||
# .encode() and .decode() on str/bytes/unicode don't accept | # .encode() and .decode() on str/bytes/unicode don't accept | ||||
# byte strings on Python 3. | # byte strings on Python 3. | ||||
elif fn in ('encode', 'decode') and _isop(i - 1, '.'): | elif fn in ('encode', 'decode') and _isop(i - 1, '.'): | ||||
for argn in range(2): | for argn in range(2): |
cfilters = [ | cfilters = [ | ||||
(r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment), | (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment), | ||||
(r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote), | (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote), | ||||
(r'''(#\s*include\s+<)([^>]+)>''', repinclude), | (r'''(#\s*include\s+<)([^>]+)>''', repinclude), | ||||
(r'(\()([^)]+\))', repcallspaces), | (r'(\()([^)]+\))', repcallspaces), | ||||
] | ] | ||||
inutilpats = [ | inutilpats = [ | ||||
[(r'\bui\.', "don't use ui in util"),], | [ | ||||
(r'\bui\.', "don't use ui in util"), | |||||
], | |||||
# warnings | # warnings | ||||
[], | [], | ||||
] | ] | ||||
inrevlogpats = [ | inrevlogpats = [ | ||||
[(r'\brepo\.', "don't use repo in revlog"),], | [ | ||||
(r'\brepo\.', "don't use repo in revlog"), | |||||
], | |||||
# warnings | # warnings | ||||
[], | [], | ||||
] | ] | ||||
webtemplatefilters = [] | webtemplatefilters = [] | ||||
webtemplatepats = [ | webtemplatepats = [ | ||||
[], | [], |
build_dir = SOURCE_DIR / "build" | build_dir = SOURCE_DIR / "build" | ||||
if pyoxidizer_target: | if pyoxidizer_target: | ||||
inno.build_with_pyoxidizer( | inno.build_with_pyoxidizer( | ||||
SOURCE_DIR, build_dir, pyoxidizer_target, iscc, version=version | SOURCE_DIR, build_dir, pyoxidizer_target, iscc, version=version | ||||
) | ) | ||||
else: | else: | ||||
inno.build_with_py2exe( | inno.build_with_py2exe( | ||||
SOURCE_DIR, build_dir, pathlib.Path(python), iscc, version=version, | SOURCE_DIR, | ||||
build_dir, | |||||
pathlib.Path(python), | |||||
iscc, | |||||
version=version, | |||||
) | ) | ||||
def build_wix( | def build_wix( | ||||
name=None, | name=None, | ||||
pyoxidizer_target=None, | pyoxidizer_target=None, | ||||
python=None, | python=None, | ||||
version=None, | version=None, |
comment_end_string='##}', | comment_end_string='##}', | ||||
) | ) | ||||
try: | try: | ||||
template = jinja_env.get_template('mercurial.iss') | template = jinja_env.get_template('mercurial.iss') | ||||
except jinja2.TemplateSyntaxError as e: | except jinja2.TemplateSyntaxError as e: | ||||
raise Exception( | raise Exception( | ||||
'template syntax error at %s:%d: %s' | 'template syntax error at %s:%d: %s' | ||||
% (e.name, e.lineno, e.message,) | % ( | ||||
e.name, | |||||
e.lineno, | |||||
e.message, | |||||
) | |||||
) | ) | ||||
content = template.render(package_files=package_files) | content = template.render(package_files=package_files) | ||||
with (inno_build_dir / 'mercurial.iss').open('w', encoding='utf-8') as fh: | with (inno_build_dir / 'mercurial.iss').open('w', encoding='utf-8') as fh: | ||||
fh.write(content) | fh.write(content) | ||||
# Copy additional files used by Inno. | # Copy additional files used by Inno. |
] | ] | ||||
for source, rel_path in sorted((extra_wxs or {}).items()): | for source, rel_path in sorted((extra_wxs or {}).items()): | ||||
assert source.endswith('.wxs') | assert source.endswith('.wxs') | ||||
source = os.path.basename(source) | source = os.path.basename(source) | ||||
args.append(str(build_dir / ('%s.wixobj' % source[:-4]))) | args.append(str(build_dir / ('%s.wixobj' % source[:-4]))) | ||||
args.extend( | args.extend( | ||||
[str(build_dir / 'stage.wixobj'), str(build_dir / 'mercurial.wixobj'),] | [ | ||||
str(build_dir / 'stage.wixobj'), | |||||
str(build_dir / 'mercurial.wixobj'), | |||||
] | |||||
) | ) | ||||
subprocess.run(args, cwd=str(source_dir), check=True) | subprocess.run(args, cwd=str(source_dir), check=True) | ||||
print('%s created' % msi_path) | print('%s created' % msi_path) | ||||
if signing_info: | if signing_info: | ||||
sign_with_signtool( | sign_with_signtool( |
) | ) | ||||
configitem( | configitem( | ||||
b'perf', | b'perf', | ||||
b'all-timing', | b'all-timing', | ||||
default=mercurial.configitems.dynamicdefault, | default=mercurial.configitems.dynamicdefault, | ||||
experimental=True, | experimental=True, | ||||
) | ) | ||||
configitem( | configitem( | ||||
b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault, | b'perf', | ||||
b'pre-run', | |||||
default=mercurial.configitems.dynamicdefault, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'perf', | b'perf', | ||||
b'profile-benchmark', | b'profile-benchmark', | ||||
default=mercurial.configitems.dynamicdefault, | default=mercurial.configitems.dynamicdefault, | ||||
) | ) | ||||
configitem( | configitem( | ||||
b'perf', | b'perf', | ||||
b'run-limits', | b'run-limits', | ||||
default=mercurial.configitems.dynamicdefault, | default=mercurial.configitems.dynamicdefault, | ||||
experimental=True, | experimental=True, | ||||
) | ) | ||||
except (ImportError, AttributeError): | except (ImportError, AttributeError): | ||||
pass | pass | ||||
except TypeError: | except TypeError: | ||||
# compatibility fix for a11fd395e83f | # compatibility fix for a11fd395e83f | ||||
# hg version: 5.2 | # hg version: 5.2 | ||||
configitem( | configitem( | ||||
b'perf', b'presleep', default=mercurial.configitems.dynamicdefault, | b'perf', | ||||
b'presleep', | |||||
default=mercurial.configitems.dynamicdefault, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'perf', b'stub', default=mercurial.configitems.dynamicdefault, | b'perf', | ||||
b'stub', | |||||
default=mercurial.configitems.dynamicdefault, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault, | b'perf', | ||||
b'parentscount', | |||||
default=mercurial.configitems.dynamicdefault, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault, | b'perf', | ||||
b'all-timing', | |||||
default=mercurial.configitems.dynamicdefault, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault, | b'perf', | ||||
b'pre-run', | |||||
default=mercurial.configitems.dynamicdefault, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'perf', | b'perf', | ||||
b'profile-benchmark', | b'profile-benchmark', | ||||
default=mercurial.configitems.dynamicdefault, | default=mercurial.configitems.dynamicdefault, | ||||
) | ) | ||||
configitem( | configitem( | ||||
b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault, | b'perf', | ||||
b'run-limits', | |||||
default=mercurial.configitems.dynamicdefault, | |||||
) | ) | ||||
def getlen(ui): | def getlen(ui): | ||||
if ui.configbool(b"perf", b"stub", False): | if ui.configbool(b"perf", b"stub", False): | ||||
return lambda x: 1 | return lambda x: 1 | ||||
return len | return len | ||||
fm = uiformatter(b'perf', opts) | fm = uiformatter(b'perf', opts) | ||||
else: | else: | ||||
# for "historical portability": | # for "historical portability": | ||||
# define formatter locally, because ui.formatter has been | # define formatter locally, because ui.formatter has been | ||||
# available since 2.2 (or ae5f92e154d3) | # available since 2.2 (or ae5f92e154d3) | ||||
from mercurial import node | from mercurial import node | ||||
class defaultformatter(object): | class defaultformatter(object): | ||||
"""Minimized composition of baseformatter and plainformatter | """Minimized composition of baseformatter and plainformatter""" | ||||
""" | |||||
def __init__(self, ui, topic, opts): | def __init__(self, ui, topic, opts): | ||||
self._ui = ui | self._ui = ui | ||||
if ui.debugflag: | if ui.debugflag: | ||||
self.hexfunc = node.hex | self.hexfunc = node.hex | ||||
else: | else: | ||||
self.hexfunc = node.short | self.hexfunc = node.short | ||||
# doesn't) | # doesn't) | ||||
raise error.Abort( | raise error.Abort( | ||||
b"perfbranchmap not available with this Mercurial", | b"perfbranchmap not available with this Mercurial", | ||||
hint=b"use 2.5 or later", | hint=b"use 2.5 or later", | ||||
) | ) | ||||
def getsvfs(repo): | def getsvfs(repo): | ||||
"""Return appropriate object to access files under .hg/store | """Return appropriate object to access files under .hg/store""" | ||||
""" | |||||
# for "historical portability": | # for "historical portability": | ||||
# repo.svfs has been available since 2.3 (or 7034365089bf) | # repo.svfs has been available since 2.3 (or 7034365089bf) | ||||
svfs = getattr(repo, 'svfs', None) | svfs = getattr(repo, 'svfs', None) | ||||
if svfs: | if svfs: | ||||
return svfs | return svfs | ||||
else: | else: | ||||
return getattr(repo, 'sopener') | return getattr(repo, 'sopener') | ||||
def getvfs(repo): | def getvfs(repo): | ||||
"""Return appropriate object to access files under .hg | """Return appropriate object to access files under .hg""" | ||||
""" | |||||
# for "historical portability": | # for "historical portability": | ||||
# repo.vfs has been available since 2.3 (or 7034365089bf) | # repo.vfs has been available since 2.3 (or 7034365089bf) | ||||
vfs = getattr(repo, 'vfs', None) | vfs = getattr(repo, 'vfs', None) | ||||
if vfs: | if vfs: | ||||
return vfs | return vfs | ||||
else: | else: | ||||
return getattr(repo, 'opener') | return getattr(repo, 'opener') | ||||
def repocleartagscachefunc(repo): | def repocleartagscachefunc(repo): | ||||
"""Return the function to clear tags cache according to repo internal API | """Return the function to clear tags cache according to repo internal API""" | ||||
""" | |||||
if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525) | if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525) | ||||
# in this case, setattr(repo, '_tagscache', None) or so isn't | # in this case, setattr(repo, '_tagscache', None) or so isn't | ||||
# correct way to clear tags cache, because existing code paths | # correct way to clear tags cache, because existing code paths | ||||
# expect _tagscache to be a structured object. | # expect _tagscache to be a structured object. | ||||
def clearcache(): | def clearcache(): | ||||
# _tagscache has been filteredpropertycache since 2.5 (or | # _tagscache has been filteredpropertycache since 2.5 (or | ||||
# 98c867ac1330), and delattr() can't work in such case | # 98c867ac1330), and delattr() can't work in such case | ||||
if '_tagscache' in vars(repo): | if '_tagscache' in vars(repo): | ||||
timer(d, setup=s) | timer(d, setup=s) | ||||
fm.end() | fm.end() | ||||
@command( | @command( | ||||
b'perftags', | b'perftags', | ||||
formatteropts | formatteropts | ||||
+ [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),], | + [ | ||||
(b'', b'clear-revlogs', False, b'refresh changelog and manifest'), | |||||
], | |||||
) | ) | ||||
def perftags(ui, repo, **opts): | def perftags(ui, repo, **opts): | ||||
opts = _byteskwargs(opts) | opts = _byteskwargs(opts) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
repocleartagscache = repocleartagscachefunc(repo) | repocleartagscache = repocleartagscachefunc(repo) | ||||
clearrevlogs = opts[b'clear_revlogs'] | clearrevlogs = opts[b'clear_revlogs'] | ||||
def s(): | def s(): | ||||
rev in s | rev in s | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command(b'perfdiscovery', formatteropts, b'PATH') | @command(b'perfdiscovery', formatteropts, b'PATH') | ||||
def perfdiscovery(ui, repo, path, **opts): | def perfdiscovery(ui, repo, path, **opts): | ||||
"""benchmark discovery between local repo and the peer at given path | """benchmark discovery between local repo and the peer at given path""" | ||||
""" | |||||
repos = [repo, None] | repos = [repo, None] | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
path = ui.expandpath(path) | path = ui.expandpath(path) | ||||
def s(): | def s(): | ||||
repos[1] = hg.peer(ui, opts, path) | repos[1] = hg.peer(ui, opts, path) | ||||
def d(): | def d(): | ||||
setdiscovery.findcommonheads(ui, *repos) | setdiscovery.findcommonheads(ui, *repos) | ||||
timer(d, setup=s) | timer(d, setup=s) | ||||
fm.end() | fm.end() | ||||
@command( | @command( | ||||
b'perfbookmarks', | b'perfbookmarks', | ||||
formatteropts | formatteropts | ||||
+ [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),], | + [ | ||||
(b'', b'clear-revlogs', False, b'refresh changelog and manifest'), | |||||
], | |||||
) | ) | ||||
def perfbookmarks(ui, repo, **opts): | def perfbookmarks(ui, repo, **opts): | ||||
"""benchmark parsing bookmarks from disk to memory""" | """benchmark parsing bookmarks from disk to memory""" | ||||
opts = _byteskwargs(opts) | opts = _byteskwargs(opts) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
clearrevlogs = opts[b'clear_revlogs'] | clearrevlogs = opts[b'clear_revlogs'] | ||||
b"a" in repo.dirstate | b"a" in repo.dirstate | ||||
timer(d, setup=setup) | timer(d, setup=setup) | ||||
fm.end() | fm.end() | ||||
@command(b'perfdirstatedirs', formatteropts) | @command(b'perfdirstatedirs', formatteropts) | ||||
def perfdirstatedirs(ui, repo, **opts): | def perfdirstatedirs(ui, repo, **opts): | ||||
"""benchmap a 'dirstate.hasdir' call from an empty `dirs` cache | """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache""" | ||||
""" | |||||
opts = _byteskwargs(opts) | opts = _byteskwargs(opts) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
repo.dirstate.hasdir(b"a") | repo.dirstate.hasdir(b"a") | ||||
def setup(): | def setup(): | ||||
del repo.dirstate._map._dirs | del repo.dirstate._map._dirs | ||||
def d(): | def d(): | ||||
dirstate._map.dirfoldmap.get(b'a') | dirstate._map.dirfoldmap.get(b'a') | ||||
timer(d, setup=setup) | timer(d, setup=setup) | ||||
fm.end() | fm.end() | ||||
@command(b'perfdirstatewrite', formatteropts) | @command(b'perfdirstatewrite', formatteropts) | ||||
def perfdirstatewrite(ui, repo, **opts): | def perfdirstatewrite(ui, repo, **opts): | ||||
"""benchmap the time it take to write a dirstate on disk | """benchmap the time it take to write a dirstate on disk""" | ||||
""" | |||||
opts = _byteskwargs(opts) | opts = _byteskwargs(opts) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
ds = repo.dirstate | ds = repo.dirstate | ||||
b"a" in ds | b"a" in ds | ||||
def setup(): | def setup(): | ||||
ds._dirty = True | ds._dirty = True | ||||
copies.pathcopies(ctx1, ctx2) | copies.pathcopies(ctx1, ctx2) | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command( | @command( | ||||
b'perfphases', | b'perfphases', | ||||
[(b'', b'full', False, b'include file reading time too'),], | [ | ||||
(b'', b'full', False, b'include file reading time too'), | |||||
], | |||||
b"", | b"", | ||||
) | ) | ||||
def perfphases(ui, repo, **opts): | def perfphases(ui, repo, **opts): | ||||
"""benchmark phasesets computation""" | """benchmark phasesets computation""" | ||||
opts = _byteskwargs(opts) | opts = _byteskwargs(opts) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
_phases = repo._phasecache | _phases = repo._phasecache | ||||
full = opts.get(b'full') | full = opts.get(b'full') | ||||
ctx.branch() # read changelog data (in addition to the index) | ctx.branch() # read changelog data (in addition to the index) | ||||
timer(moonwalk) | timer(moonwalk) | ||||
fm.end() | fm.end() | ||||
@command( | @command( | ||||
b'perftemplating', | b'perftemplating', | ||||
[(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts, | [ | ||||
(b'r', b'rev', [], b'revisions to run the template on'), | |||||
] | |||||
+ formatteropts, | |||||
) | ) | ||||
def perftemplating(ui, repo, testedtemplate=None, **opts): | def perftemplating(ui, repo, testedtemplate=None, **opts): | ||||
"""test the rendering time of a given template""" | """test the rendering time of a given template""" | ||||
if makelogtemplater is None: | if makelogtemplater is None: | ||||
raise error.Abort( | raise error.Abort( | ||||
b"perftemplating not available with this Mercurial", | b"perftemplating not available with this Mercurial", | ||||
hint=b"use 4.3 or later", | hint=b"use 4.3 or later", | ||||
) | ) | ||||
data = { | data = { | ||||
b'source': base.hex(), | b'source': base.hex(), | ||||
b'destination': parent.hex(), | b'destination': parent.hex(), | ||||
b'nbrevs': len(repo.revs('only(%d, %d)', p, b)), | b'nbrevs': len(repo.revs('only(%d, %d)', p, b)), | ||||
b'nbmissingfiles': len(missing), | b'nbmissingfiles': len(missing), | ||||
} | } | ||||
if dostats: | if dostats: | ||||
alldata['nbrevs'].append( | alldata['nbrevs'].append( | ||||
(data['nbrevs'], base.hex(), parent.hex(),) | ( | ||||
data['nbrevs'], | |||||
base.hex(), | |||||
parent.hex(), | |||||
) | |||||
) | ) | ||||
alldata['nbmissingfiles'].append( | alldata['nbmissingfiles'].append( | ||||
(data['nbmissingfiles'], base.hex(), parent.hex(),) | ( | ||||
data['nbmissingfiles'], | |||||
base.hex(), | |||||
parent.hex(), | |||||
) | |||||
) | ) | ||||
if dotiming: | if dotiming: | ||||
begin = util.timer() | begin = util.timer() | ||||
renames = copies.pathcopies(base, parent) | renames = copies.pathcopies(base, parent) | ||||
end = util.timer() | end = util.timer() | ||||
# not very stable timing since we did only one run | # not very stable timing since we did only one run | ||||
data['time'] = end - begin | data['time'] = end - begin | ||||
data['nbrenamedfiles'] = len(renames) | data['nbrenamedfiles'] = len(renames) | ||||
if dostats: | if dostats: | ||||
alldata['time'].append( | alldata['time'].append( | ||||
(data['time'], base.hex(), parent.hex(),) | ( | ||||
data['time'], | |||||
base.hex(), | |||||
parent.hex(), | |||||
) | |||||
) | ) | ||||
alldata['nbrenames'].append( | alldata['nbrenames'].append( | ||||
(data['nbrenamedfiles'], base.hex(), parent.hex(),) | ( | ||||
data['nbrenamedfiles'], | |||||
base.hex(), | |||||
parent.hex(), | |||||
) | |||||
) | ) | ||||
fm.startitem() | fm.startitem() | ||||
fm.data(**data) | fm.data(**data) | ||||
out = data.copy() | out = data.copy() | ||||
out['source'] = fm.hexfunc(base.node()) | out['source'] = fm.hexfunc(base.node()) | ||||
out['destination'] = fm.hexfunc(parent.node()) | out['destination'] = fm.hexfunc(parent.node()) | ||||
fm.plain(output % out) | fm.plain(output % out) | ||||
pass | pass | ||||
timer(d) | timer(d) | ||||
fm.end() | fm.end() | ||||
@command( | @command( | ||||
b'perfvolatilesets', | b'perfvolatilesets', | ||||
[(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),] | [ | ||||
(b'', b'clear-obsstore', False, b'drop obsstore between each call.'), | |||||
] | |||||
+ formatteropts, | + formatteropts, | ||||
) | ) | ||||
def perfvolatilesets(ui, repo, *names, **opts): | def perfvolatilesets(ui, repo, *names, **opts): | ||||
"""benchmark the computation of various volatile set | """benchmark the computation of various volatile set | ||||
Volatile set computes element related to filtering and obsolescence.""" | Volatile set computes element related to filtering and obsolescence.""" | ||||
opts = _byteskwargs(opts) | opts = _byteskwargs(opts) | ||||
timer, fm = gettimer(ui, opts) | timer, fm = gettimer(ui, opts) | ||||
(b'', b'nlines', 100, b'number of lines'), | (b'', b'nlines', 100, b'number of lines'), | ||||
(b'', b'nitems', 100, b'number of items (per line)'), | (b'', b'nitems', 100, b'number of items (per line)'), | ||||
(b'', b'item', b'x', b'item that is written'), | (b'', b'item', b'x', b'item that is written'), | ||||
(b'', b'batch-line', None, b'pass whole line to write method at once'), | (b'', b'batch-line', None, b'pass whole line to write method at once'), | ||||
(b'', b'flush-line', None, b'flush after each line'), | (b'', b'flush-line', None, b'flush after each line'), | ||||
], | ], | ||||
) | ) | ||||
def perfwrite(ui, repo, **opts): | def perfwrite(ui, repo, **opts): | ||||
"""microbenchmark ui.write (and others) | """microbenchmark ui.write (and others)""" | ||||
""" | |||||
opts = _byteskwargs(opts) | opts = _byteskwargs(opts) | ||||
write = getattr(ui, _sysstr(opts[b'write_method'])) | write = getattr(ui, _sysstr(opts[b'write_method'])) | ||||
nlines = int(opts[b'nlines']) | nlines = int(opts[b'nlines']) | ||||
nitems = int(opts[b'nitems']) | nitems = int(opts[b'nitems']) | ||||
item = opts[b'item'] | item = opts[b'item'] | ||||
batch_line = opts.get(b'batch_line') | batch_line = opts.get(b'batch_line') | ||||
flush_line = opts.get(b'flush_line') | flush_line = opts.get(b'flush_line') |
''' | ''' | ||||
Examples of useful python hooks for Mercurial. | Examples of useful python hooks for Mercurial. | ||||
''' | ''' | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
from mercurial import ( | from mercurial import ( | ||||
patch, | patch, | ||||
util, | util, | ||||
) | ) | ||||
def diffstat(ui, repo, **kwargs): | def diffstat(ui, repo, **kwargs): | ||||
'''Example usage: | """Example usage: | ||||
[hooks] | [hooks] | ||||
commit.diffstat = python:/path/to/this/file.py:diffstat | commit.diffstat = python:/path/to/this/file.py:diffstat | ||||
changegroup.diffstat = python:/path/to/this/file.py:diffstat | changegroup.diffstat = python:/path/to/this/file.py:diffstat | ||||
''' | """ | ||||
if kwargs.get('parent2'): | if kwargs.get('parent2'): | ||||
return | return | ||||
node = kwargs['node'] | node = kwargs['node'] | ||||
first = repo[node].p1().node() | first = repo[node].p1().node() | ||||
if 'url' in kwargs: | if 'url' in kwargs: | ||||
last = repo.changelog.tip() | last = repo.changelog.tip() | ||||
else: | else: | ||||
last = node | last = node | ||||
diff = patch.diff(repo, first, last) | diff = patch.diff(repo, first, last) | ||||
ui.write(patch.diffstat(util.iterlines(diff))) | ui.write(patch.diffstat(util.iterlines(diff))) |
"dictBuilder/divsufsort.c", | "dictBuilder/divsufsort.c", | ||||
"dictBuilder/zdict.c", | "dictBuilder/zdict.c", | ||||
) | ) | ||||
] | ] | ||||
# Headers whose preprocessed output will be fed into cdef(). | # Headers whose preprocessed output will be fed into cdef(). | ||||
HEADERS = [ | HEADERS = [ | ||||
os.path.join(HERE, "zstd", *p) | os.path.join(HERE, "zstd", *p) | ||||
for p in (("zstd.h",), ("dictBuilder", "zdict.h"),) | for p in ( | ||||
("zstd.h",), | |||||
("dictBuilder", "zdict.h"), | |||||
) | |||||
] | ] | ||||
INCLUDE_DIRS = [ | INCLUDE_DIRS = [ | ||||
os.path.join(HERE, d) | os.path.join(HERE, d) | ||||
for d in ( | for d in ( | ||||
"zstd", | "zstd", | ||||
"zstd/common", | "zstd/common", | ||||
"zstd/compress", | "zstd/compress", | ||||
if hasattr(compiler, "initialize"): | if hasattr(compiler, "initialize"): | ||||
compiler.initialize() | compiler.initialize() | ||||
# Distutils doesn't set compiler.preprocessor, so invoke the preprocessor | # Distutils doesn't set compiler.preprocessor, so invoke the preprocessor | ||||
# manually. | # manually. | ||||
if compiler.compiler_type == "unix": | if compiler.compiler_type == "unix": | ||||
args = list(compiler.executables["compiler"]) | args = list(compiler.executables["compiler"]) | ||||
args.extend( | args.extend( | ||||
["-E", "-DZSTD_STATIC_LINKING_ONLY", "-DZDICT_STATIC_LINKING_ONLY",] | [ | ||||
"-E", | |||||
"-DZSTD_STATIC_LINKING_ONLY", | |||||
"-DZDICT_STATIC_LINKING_ONLY", | |||||
] | |||||
) | ) | ||||
elif compiler.compiler_type == "msvc": | elif compiler.compiler_type == "msvc": | ||||
args = [compiler.cc] | args = [compiler.cc] | ||||
args.extend( | args.extend( | ||||
["/EP", "/DZSTD_STATIC_LINKING_ONLY", "/DZDICT_STATIC_LINKING_ONLY",] | [ | ||||
"/EP", | |||||
"/DZSTD_STATIC_LINKING_ONLY", | |||||
"/DZDICT_STATIC_LINKING_ONLY", | |||||
] | |||||
) | ) | ||||
else: | else: | ||||
raise Exception("unsupported compiler type: %s" % compiler.compiler_type) | raise Exception("unsupported compiler type: %s" % compiler.compiler_type) | ||||
def preprocess(path): | def preprocess(path): | ||||
with open(path, "rb") as fh: | with open(path, "rb") as fh: | ||||
lines = [] | lines = [] |
import argparse | import argparse | ||||
import json | import json | ||||
import os | import os | ||||
import subprocess | import subprocess | ||||
import sys | import sys | ||||
_hgenv = dict(os.environ) | _hgenv = dict(os.environ) | ||||
_hgenv.update( | _hgenv.update( | ||||
{'HGPLAIN': '1',} | { | ||||
'HGPLAIN': '1', | |||||
} | |||||
) | ) | ||||
_HG_FIRST_CHANGE = '9117c6561b0bd7792fa13b50d28239d51b78e51f' | _HG_FIRST_CHANGE = '9117c6561b0bd7792fa13b50d28239d51b78e51f' | ||||
def _runhg(*args): | def _runhg(*args): | ||||
return subprocess.check_output(args, env=_hgenv) | return subprocess.check_output(args, env=_hgenv) | ||||
[ | [ | ||||
('o', 'output', '', _('write output to given file'), _('FILE')), | ('o', 'output', '', _('write output to given file'), _('FILE')), | ||||
('r', 'rev', [], _('analyze specified revisions'), _('REV')), | ('r', 'rev', [], _('analyze specified revisions'), _('REV')), | ||||
], | ], | ||||
_('hg analyze'), | _('hg analyze'), | ||||
optionalrepo=True, | optionalrepo=True, | ||||
) | ) | ||||
def analyze(ui, repo, *revs, **opts): | def analyze(ui, repo, *revs, **opts): | ||||
'''create a simple model of a repository to use for later synthesis | """create a simple model of a repository to use for later synthesis | ||||
This command examines every changeset in the given range (or all | This command examines every changeset in the given range (or all | ||||
of history if none are specified) and creates a simple statistical | of history if none are specified) and creates a simple statistical | ||||
model of the history of the repository. It also measures the directory | model of the history of the repository. It also measures the directory | ||||
structure of the repository as checked out. | structure of the repository as checked out. | ||||
The model is written out to a JSON file, and can be used by | The model is written out to a JSON file, and can be used by | ||||
:hg:`synthesize` to create or augment a repository with synthetic | :hg:`synthesize` to create or augment a repository with synthetic | ||||
commits that have a structure that is statistically similar to the | commits that have a structure that is statistically similar to the | ||||
analyzed repository. | analyzed repository. | ||||
''' | """ | ||||
root = repo.root | root = repo.root | ||||
if not root.endswith(os.path.sep): | if not root.endswith(os.path.sep): | ||||
root += os.path.sep | root += os.path.sep | ||||
revs = list(revs) | revs = list(revs) | ||||
revs.extend(opts['rev']) | revs.extend(opts['rev']) | ||||
if not revs: | if not revs: | ||||
revs = [':'] | revs = [':'] | ||||
[ | [ | ||||
('c', 'count', 0, _('create given number of commits'), _('COUNT')), | ('c', 'count', 0, _('create given number of commits'), _('COUNT')), | ||||
('', 'dict', '', _('path to a dictionary of words'), _('FILE')), | ('', 'dict', '', _('path to a dictionary of words'), _('FILE')), | ||||
('', 'initfiles', 0, _('initial file count to create'), _('COUNT')), | ('', 'initfiles', 0, _('initial file count to create'), _('COUNT')), | ||||
], | ], | ||||
_('hg synthesize [OPTION].. DESCFILE'), | _('hg synthesize [OPTION].. DESCFILE'), | ||||
) | ) | ||||
def synthesize(ui, repo, descpath, **opts): | def synthesize(ui, repo, descpath, **opts): | ||||
'''synthesize commits based on a model of an existing repository | """synthesize commits based on a model of an existing repository | ||||
The model must have been generated by :hg:`analyze`. Commits will | The model must have been generated by :hg:`analyze`. Commits will | ||||
be generated randomly according to the probabilities described in | be generated randomly according to the probabilities described in | ||||
the model. If --initfiles is set, the repository will be seeded with | the model. If --initfiles is set, the repository will be seeded with | ||||
the given number files following the modeled repository's directory | the given number files following the modeled repository's directory | ||||
structure. | structure. | ||||
When synthesizing new content, commit descriptions, and user | When synthesizing new content, commit descriptions, and user | ||||
names, words will be chosen randomly from a dictionary that is | names, words will be chosen randomly from a dictionary that is | ||||
presumed to contain one word per line. Use --dict to specify the | presumed to contain one word per line. Use --dict to specify the | ||||
path to an alternate dictionary to use. | path to an alternate dictionary to use. | ||||
''' | """ | ||||
try: | try: | ||||
fp = hg.openpath(ui, descpath) | fp = hg.openpath(ui, descpath) | ||||
except Exception as err: | except Exception as err: | ||||
raise error.Abort('%s: %s' % (descpath, err[0].strerror)) | raise error.Abort('%s: %s' % (descpath, err[0].strerror)) | ||||
desc = json.load(fp) | desc = json.load(fp) | ||||
fp.close() | fp.close() | ||||
def cdf(l): | def cdf(l): | ||||
def renamedirs(dirs, words): | def renamedirs(dirs, words): | ||||
'''Randomly rename the directory names in the per-dir file count dict.''' | '''Randomly rename the directory names in the per-dir file count dict.''' | ||||
wordgen = itertools.cycle(words) | wordgen = itertools.cycle(words) | ||||
replacements = {'': ''} | replacements = {'': ''} | ||||
def rename(dirpath): | def rename(dirpath): | ||||
'''Recursively rename the directory and all path prefixes. | """Recursively rename the directory and all path prefixes. | ||||
The mapping from path to renamed path is stored for all path prefixes | The mapping from path to renamed path is stored for all path prefixes | ||||
as in dynamic programming, ensuring linear runtime and consistent | as in dynamic programming, ensuring linear runtime and consistent | ||||
renaming regardless of iteration order through the model. | renaming regardless of iteration order through the model. | ||||
''' | """ | ||||
if dirpath in replacements: | if dirpath in replacements: | ||||
return replacements[dirpath] | return replacements[dirpath] | ||||
head, _ = os.path.split(dirpath) | head, _ = os.path.split(dirpath) | ||||
if head: | if head: | ||||
head = rename(head) | head = rename(head) | ||||
else: | else: | ||||
head = '' | head = '' | ||||
renamed = os.path.join(head, next(wordgen)) | renamed = os.path.join(head, next(wordgen)) | ||||
replacements[dirpath] = renamed | replacements[dirpath] = renamed | ||||
return renamed | return renamed | ||||
result = [] | result = [] | ||||
for dirpath, count in dirs.iteritems(): | for dirpath, count in dirs.iteritems(): | ||||
result.append([rename(dirpath.lstrip(os.sep)), count]) | result.append([rename(dirpath.lstrip(os.sep)), count]) | ||||
return result | return result |
# write "data" in BYTES into stderr | # write "data" in BYTES into stderr | ||||
sys.stderr.write(data) | sys.stderr.write(data) | ||||
#################### | #################### | ||||
class embeddedmatcher(object): # pytype: disable=ignored-metaclass | class embeddedmatcher(object): # pytype: disable=ignored-metaclass | ||||
"""Base class to detect embedded code fragments in *.t test script | """Base class to detect embedded code fragments in *.t test script""" | ||||
""" | |||||
__metaclass__ = abc.ABCMeta | __metaclass__ = abc.ABCMeta | ||||
def __init__(self, desc): | def __init__(self, desc): | ||||
self.desc = desc | self.desc = desc | ||||
@abc.abstractmethod | @abc.abstractmethod | ||||
def startsat(self, line): | def startsat(self, line): | ||||
"""Examine whether embedded code starts at line | """Examine whether embedded code starts at line | ||||
This can return arbitrary object, and it is used as 'ctx' for | This can return arbitrary object, and it is used as 'ctx' for | ||||
subsequent method invocations. | subsequent method invocations. | ||||
""" | """ | ||||
@abc.abstractmethod | @abc.abstractmethod | ||||
def endsat(self, ctx, line): | def endsat(self, ctx, line): | ||||
"""Examine whether embedded code ends at line""" | """Examine whether embedded code ends at line""" | ||||
@abc.abstractmethod | @abc.abstractmethod | ||||
def isinside(self, ctx, line): | def isinside(self, ctx, line): | ||||
"""Examine whether line is inside embedded code, if not yet endsat | """Examine whether line is inside embedded code, if not yet endsat""" | ||||
""" | |||||
@abc.abstractmethod | @abc.abstractmethod | ||||
def ignores(self, ctx): | def ignores(self, ctx): | ||||
"""Examine whether detected embedded code should be ignored""" | """Examine whether detected embedded code should be ignored""" | ||||
@abc.abstractmethod | @abc.abstractmethod | ||||
def filename(self, ctx): | def filename(self, ctx): | ||||
"""Return filename of embedded code | """Return filename of embedded code |
def depart_line(self, node): | def depart_line(self, node): | ||||
self.body.append('\n') | self.body.append('\n') | ||||
def visit_list_item(self, node): | def visit_list_item(self, node): | ||||
# man 7 man argues to use ".IP" instead of ".TP" | # man 7 man argues to use ".IP" instead of ".TP" | ||||
self.body.append( | self.body.append( | ||||
'.IP %s %d\n' | '.IP %s %d\n' | ||||
% (next(self._list_char[-1]), self._list_char[-1].get_width(),) | % ( | ||||
next(self._list_char[-1]), | |||||
self._list_char[-1].get_width(), | |||||
) | |||||
) | ) | ||||
def depart_list_item(self, node): | def depart_list_item(self, node): | ||||
pass | pass | ||||
def visit_literal(self, node): | def visit_literal(self, node): | ||||
self.body.append(self.defs['literal'][0]) | self.body.append(self.defs['literal'][0]) | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
# deprecated config: acl.config | # deprecated config: acl.config | ||||
configitem( | configitem( | ||||
b'acl', b'config', default=None, | b'acl', | ||||
b'config', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'acl.groups', b'.*', default=None, generic=True, | b'acl.groups', | ||||
b'.*', | |||||
default=None, | |||||
generic=True, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'acl.deny.branches', b'.*', default=None, generic=True, | b'acl.deny.branches', | ||||
b'.*', | |||||
default=None, | |||||
generic=True, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'acl.allow.branches', b'.*', default=None, generic=True, | b'acl.allow.branches', | ||||
b'.*', | |||||
default=None, | |||||
generic=True, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'acl.deny', b'.*', default=None, generic=True, | b'acl.deny', | ||||
b'.*', | |||||
default=None, | |||||
generic=True, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'acl.allow', b'.*', default=None, generic=True, | b'acl.allow', | ||||
b'.*', | |||||
default=None, | |||||
generic=True, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'acl', b'sources', default=lambda: [b'serve'], | b'acl', | ||||
b'sources', | |||||
default=lambda: [b'serve'], | |||||
) | ) | ||||
def _getusers(ui, group): | def _getusers(ui, group): | ||||
# First, try to use group definition from section [acl.groups] | # First, try to use group definition from section [acl.groups] | ||||
hgrcusers = ui.configlist(b'acl.groups', group) | hgrcusers = ui.configlist(b'acl.groups', group) | ||||
if hgrcusers: | if hgrcusers: |
scmutil, | scmutil, | ||||
similar, | similar, | ||||
) | ) | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'automv', b'similarity', default=95, | b'automv', | ||||
b'similarity', | |||||
default=95, | |||||
) | ) | ||||
def extsetup(ui): | def extsetup(ui): | ||||
entry = extensions.wrapcommand(commands.table, b'commit', mvcheck) | entry = extensions.wrapcommand(commands.table, b'commit', mvcheck) | ||||
entry[1].append( | entry[1].append( | ||||
(b'', b'no-automv', None, _(b'disable automatic file move detection')) | (b'', b'no-automv', None, _(b'disable automatic file move detection')) | ||||
) | ) |
cmdtable = {} | cmdtable = {} | ||||
command = registrar.command(cmdtable) | command = registrar.command(cmdtable) | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'blackbox', b'dirty', default=False, | b'blackbox', | ||||
b'dirty', | |||||
default=False, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'blackbox', b'maxsize', default=b'1 MB', | b'blackbox', | ||||
b'maxsize', | |||||
default=b'1 MB', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'blackbox', b'logsource', default=False, | b'blackbox', | ||||
b'logsource', | |||||
default=False, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'blackbox', b'maxfiles', default=7, | b'blackbox', | ||||
b'maxfiles', | |||||
default=7, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'blackbox', b'track', default=lambda: [b'*'], | b'blackbox', | ||||
b'track', | |||||
default=lambda: [b'*'], | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'blackbox', | b'blackbox', | ||||
b'ignore', | b'ignore', | ||||
default=lambda: [b'chgserver', b'cmdserver', b'extension'], | default=lambda: [b'chgserver', b'cmdserver', b'extension'], | ||||
) | ) | ||||
configitem( | configitem( | ||||
b'blackbox', b'date-format', default=b'%Y/%m/%d %H:%M:%S', | b'blackbox', | ||||
b'date-format', | |||||
default=b'%Y/%m/%d %H:%M:%S', | |||||
) | ) | ||||
_lastlogger = loggingutil.proxylogger() | _lastlogger = loggingutil.proxylogger() | ||||
class blackboxlogger(object): | class blackboxlogger(object): | ||||
def __init__(self, ui, repo): | def __init__(self, ui, repo): | ||||
self._repo = repo | self._repo = repo | ||||
if _lastlogger.logger is None: | if _lastlogger.logger is None: | ||||
_lastlogger.logger = logger | _lastlogger.logger = logger | ||||
repo._wlockfreeprefix.add(b'blackbox.log') | repo._wlockfreeprefix.add(b'blackbox.log') | ||||
@command( | @command( | ||||
b'blackbox', | b'blackbox', | ||||
[(b'l', b'limit', 10, _(b'the number of events to show')),], | [ | ||||
(b'l', b'limit', 10, _(b'the number of events to show')), | |||||
], | |||||
_(b'hg blackbox [OPTION]...'), | _(b'hg blackbox [OPTION]...'), | ||||
helpcategory=command.CATEGORY_MAINTENANCE, | helpcategory=command.CATEGORY_MAINTENANCE, | ||||
helpbasic=True, | helpbasic=True, | ||||
) | ) | ||||
def blackbox(ui, repo, *revs, **opts): | def blackbox(ui, repo, *revs, **opts): | ||||
'''view the recent repository events | """view the recent repository events""" | ||||
''' | |||||
if not repo.vfs.exists(b'blackbox.log'): | if not repo.vfs.exists(b'blackbox.log'): | ||||
return | return | ||||
limit = opts.get('limit') | limit = opts.get('limit') | ||||
fp = repo.vfs(b'blackbox.log', b'r') | fp = repo.vfs(b'blackbox.log', b'r') | ||||
lines = fp.read().split(b'\n') | lines = fp.read().split(b'\n') | ||||
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'apikey', default=b'', | b'bugzilla', | ||||
b'apikey', | |||||
default=b'', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'bzdir', default=b'/var/www/html/bugzilla', | b'bugzilla', | ||||
b'bzdir', | |||||
default=b'/var/www/html/bugzilla', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'bzemail', default=None, | b'bugzilla', | ||||
b'bzemail', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'bzurl', default=b'http://localhost/bugzilla/', | b'bugzilla', | ||||
b'bzurl', | |||||
default=b'http://localhost/bugzilla/', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'bzuser', default=None, | b'bugzilla', | ||||
b'bzuser', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'db', default=b'bugs', | b'bugzilla', | ||||
b'db', | |||||
default=b'bugs', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', | b'bugzilla', | ||||
b'fixregexp', | b'fixregexp', | ||||
default=( | default=( | ||||
br'fix(?:es)?\s*(?:bugs?\s*)?,?\s*' | br'fix(?:es)?\s*(?:bugs?\s*)?,?\s*' | ||||
br'(?:nos?\.?|num(?:ber)?s?)?\s*' | br'(?:nos?\.?|num(?:ber)?s?)?\s*' | ||||
br'(?P<ids>(?:#?\d+\s*(?:,?\s*(?:and)?)?\s*)+)' | br'(?P<ids>(?:#?\d+\s*(?:,?\s*(?:and)?)?\s*)+)' | ||||
br'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?' | br'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?' | ||||
), | ), | ||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'fixresolution', default=b'FIXED', | b'bugzilla', | ||||
b'fixresolution', | |||||
default=b'FIXED', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'fixstatus', default=b'RESOLVED', | b'bugzilla', | ||||
b'fixstatus', | |||||
default=b'RESOLVED', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'host', default=b'localhost', | b'bugzilla', | ||||
b'host', | |||||
default=b'localhost', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'notify', default=configitem.dynamicdefault, | b'bugzilla', | ||||
b'notify', | |||||
default=configitem.dynamicdefault, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'password', default=None, | b'bugzilla', | ||||
b'password', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', | b'bugzilla', | ||||
b'regexp', | b'regexp', | ||||
default=( | default=( | ||||
br'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*' | br'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*' | ||||
br'(?P<ids>(?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)' | br'(?P<ids>(?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)' | ||||
br'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?' | br'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?' | ||||
), | ), | ||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'strip', default=0, | b'bugzilla', | ||||
b'strip', | |||||
default=0, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'style', default=None, | b'bugzilla', | ||||
b'style', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'template', default=None, | b'bugzilla', | ||||
b'template', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'timeout', default=5, | b'bugzilla', | ||||
b'timeout', | |||||
default=5, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'user', default=b'bugs', | b'bugzilla', | ||||
b'user', | |||||
default=b'bugs', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'usermap', default=None, | b'bugzilla', | ||||
b'usermap', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'bugzilla', b'version', default=None, | b'bugzilla', | ||||
b'version', | |||||
default=None, | |||||
) | ) | ||||
class bzaccess(object): | class bzaccess(object): | ||||
'''Base class for access to Bugzilla.''' | '''Base class for access to Bugzilla.''' | ||||
def __init__(self, ui): | def __init__(self, ui): | ||||
self.ui = ui | self.ui = ui | ||||
def filter_real_bug_ids(self, bugs): | def filter_real_bug_ids(self, bugs): | ||||
'''remove bug IDs that do not exist in Bugzilla from bugs.''' | '''remove bug IDs that do not exist in Bugzilla from bugs.''' | ||||
def filter_cset_known_bug_ids(self, node, bugs): | def filter_cset_known_bug_ids(self, node, bugs): | ||||
'''remove bug IDs where node occurs in comment text from bugs.''' | '''remove bug IDs where node occurs in comment text from bugs.''' | ||||
def updatebug(self, bugid, newstate, text, committer): | def updatebug(self, bugid, newstate, text, committer): | ||||
'''update the specified bug. Add comment text and set new states. | """update the specified bug. Add comment text and set new states. | ||||
If possible add the comment as being from the committer of | If possible add the comment as being from the committer of | ||||
the changeset. Otherwise use the default Bugzilla user. | the changeset. Otherwise use the default Bugzilla user. | ||||
''' | """ | ||||
def notify(self, bugs, committer): | def notify(self, bugs, committer): | ||||
'''Force sending of Bugzilla notification emails. | """Force sending of Bugzilla notification emails. | ||||
Only required if the access method does not trigger notification | Only required if the access method does not trigger notification | ||||
emails automatically. | emails automatically. | ||||
''' | """ | ||||
# Bugzilla via direct access to MySQL database. | # Bugzilla via direct access to MySQL database. | ||||
class bzmysql(bzaccess): | class bzmysql(bzaccess): | ||||
'''Support for direct MySQL access to Bugzilla. | """Support for direct MySQL access to Bugzilla. | ||||
The earliest Bugzilla version this is tested with is version 2.16. | The earliest Bugzilla version this is tested with is version 2.16. | ||||
If your Bugzilla is version 3.4 or above, you are strongly | If your Bugzilla is version 3.4 or above, you are strongly | ||||
recommended to use the XMLRPC access method instead. | recommended to use the XMLRPC access method instead. | ||||
''' | """ | ||||
@staticmethod | @staticmethod | ||||
def sql_buglist(ids): | def sql_buglist(ids): | ||||
'''return SQL-friendly list of bug ids''' | '''return SQL-friendly list of bug ids''' | ||||
return b'(' + b','.join(map(str, ids)) + b')' | return b'(' + b','.join(map(str, ids)) + b')' | ||||
_MySQLdb = None | _MySQLdb = None | ||||
all = self.cursor.fetchall() | all = self.cursor.fetchall() | ||||
if len(all) != 1: | if len(all) != 1: | ||||
raise KeyError(user) | raise KeyError(user) | ||||
userid = int(all[0][0]) | userid = int(all[0][0]) | ||||
self.user_ids[user] = userid | self.user_ids[user] = userid | ||||
return userid | return userid | ||||
def get_bugzilla_user(self, committer): | def get_bugzilla_user(self, committer): | ||||
'''See if committer is a registered bugzilla user. Return | """See if committer is a registered bugzilla user. Return | ||||
bugzilla username and userid if so. If not, return default | bugzilla username and userid if so. If not, return default | ||||
bugzilla username and userid.''' | bugzilla username and userid.""" | ||||
user = self.map_committer(committer) | user = self.map_committer(committer) | ||||
try: | try: | ||||
userid = self.get_user_id(user) | userid = self.get_user_id(user) | ||||
except KeyError: | except KeyError: | ||||
try: | try: | ||||
defaultuser = self.ui.config(b'bugzilla', b'bzuser') | defaultuser = self.ui.config(b'bugzilla', b'bzuser') | ||||
if not defaultuser: | if not defaultuser: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot find bugzilla user id for %s') % user | _(b'cannot find bugzilla user id for %s') % user | ||||
) | ) | ||||
userid = self.get_user_id(defaultuser) | userid = self.get_user_id(defaultuser) | ||||
user = defaultuser | user = defaultuser | ||||
except KeyError: | except KeyError: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot find bugzilla user id for %s or %s') | _(b'cannot find bugzilla user id for %s or %s') | ||||
% (user, defaultuser) | % (user, defaultuser) | ||||
) | ) | ||||
return (user, userid) | return (user, userid) | ||||
def updatebug(self, bugid, newstate, text, committer): | def updatebug(self, bugid, newstate, text, committer): | ||||
'''update bug state with comment text. | """update bug state with comment text. | ||||
Try adding comment as committer of changeset, otherwise as | Try adding comment as committer of changeset, otherwise as | ||||
default bugzilla user.''' | default bugzilla user.""" | ||||
if len(newstate) > 0: | if len(newstate) > 0: | ||||
self.ui.warn(_(b"Bugzilla/MySQL cannot update bug state\n")) | self.ui.warn(_(b"Bugzilla/MySQL cannot update bug state\n")) | ||||
(user, userid) = self.get_bugzilla_user(committer) | (user, userid) = self.get_bugzilla_user(committer) | ||||
now = time.strftime('%Y-%m-%d %H:%M:%S') | now = time.strftime('%Y-%m-%d %H:%M:%S') | ||||
self.run( | self.run( | ||||
'''insert into longdescs | '''insert into longdescs | ||||
(bug_id, who, bug_when, thetext) | (bug_id, who, bug_when, thetext) | ||||
if self.bzvermajor >= 4: | if self.bzvermajor >= 4: | ||||
return b"@%s %s" % (fieldname, pycompat.bytestr(value)) | return b"@%s %s" % (fieldname, pycompat.bytestr(value)) | ||||
else: | else: | ||||
if fieldname == b"id": | if fieldname == b"id": | ||||
fieldname = b"bug_id" | fieldname = b"bug_id" | ||||
return b"@%s = %s" % (fieldname, pycompat.bytestr(value)) | return b"@%s = %s" % (fieldname, pycompat.bytestr(value)) | ||||
def send_bug_modify_email(self, bugid, commands, comment, committer): | def send_bug_modify_email(self, bugid, commands, comment, committer): | ||||
'''send modification message to Bugzilla bug via email. | """send modification message to Bugzilla bug via email. | ||||
The message format is documented in the Bugzilla email_in.pl | The message format is documented in the Bugzilla email_in.pl | ||||
specification. commands is a list of command lines, comment is the | specification. commands is a list of command lines, comment is the | ||||
comment text. | comment text. | ||||
To stop users from crafting commit comments with | To stop users from crafting commit comments with | ||||
Bugzilla commands, specify the bug ID via the message body, rather | Bugzilla commands, specify the bug ID via the message body, rather | ||||
than the subject line, and leave a blank line after it. | than the subject line, and leave a blank line after it. | ||||
''' | """ | ||||
user = self.map_committer(committer) | user = self.map_committer(committer) | ||||
matches = self.bzproxy.User.get( | matches = self.bzproxy.User.get( | ||||
{b'match': [user], b'token': self.bztoken} | {b'match': [user], b'token': self.bztoken} | ||||
) | ) | ||||
if not matches[b'users']: | if not matches[b'users']: | ||||
user = self.ui.config(b'bugzilla', b'user') | user = self.ui.config(b'bugzilla', b'user') | ||||
matches = self.bzproxy.User.get( | matches = self.bzproxy.User.get( | ||||
{b'match': [user], b'token': self.bztoken} | {b'match': [user], b'token': self.bztoken} | ||||
if any(sn in c[b'text'] for c in comments): | if any(sn in c[b'text'] for c in comments): | ||||
self.ui.status( | self.ui.status( | ||||
_(b'bug %d already knows about changeset %s\n') | _(b'bug %d already knows about changeset %s\n') | ||||
% (bugid, sn) | % (bugid, sn) | ||||
) | ) | ||||
del bugs[bugid] | del bugs[bugid] | ||||
def updatebug(self, bugid, newstate, text, committer): | def updatebug(self, bugid, newstate, text, committer): | ||||
'''update the specified bug. Add comment text and set new states. | """update the specified bug. Add comment text and set new states. | ||||
If possible add the comment as being from the committer of | If possible add the comment as being from the committer of | ||||
the changeset. Otherwise use the default Bugzilla user. | the changeset. Otherwise use the default Bugzilla user. | ||||
''' | """ | ||||
bugmod = {} | bugmod = {} | ||||
if b'hours' in newstate: | if b'hours' in newstate: | ||||
bugmod[b'work_time'] = newstate[b'hours'] | bugmod[b'work_time'] = newstate[b'hours'] | ||||
if b'fix' in newstate: | if b'fix' in newstate: | ||||
bugmod[b'status'] = self.fixstatus | bugmod[b'status'] = self.fixstatus | ||||
bugmod[b'resolution'] = self.fixresolution | bugmod[b'resolution'] = self.fixresolution | ||||
if bugmod: | if bugmod: | ||||
# if we have to change the bugs state do it here | # if we have to change the bugs state do it here | ||||
b'comment': text, | b'comment': text, | ||||
b'is_private': False, | b'is_private': False, | ||||
b'is_markdown': False, | b'is_markdown': False, | ||||
}, | }, | ||||
) | ) | ||||
self.ui.debug(b'added comment to bug %s\n' % bugid) | self.ui.debug(b'added comment to bug %s\n' % bugid) | ||||
def notify(self, bugs, committer): | def notify(self, bugs, committer): | ||||
'''Force sending of Bugzilla notification emails. | """Force sending of Bugzilla notification emails. | ||||
Only required if the access method does not trigger notification | Only required if the access method does not trigger notification | ||||
emails automatically. | emails automatically. | ||||
''' | """ | ||||
pass | pass | ||||
class bugzilla(object): | class bugzilla(object): | ||||
# supported versions of bugzilla. different versions have | # supported versions of bugzilla. different versions have | ||||
# different schemas. | # different schemas. | ||||
_versions = { | _versions = { | ||||
b'2.16': bzmysql, | b'2.16': bzmysql, | ||||
self.ui.config(b'bugzilla', b'regexp'), re.IGNORECASE | self.ui.config(b'bugzilla', b'regexp'), re.IGNORECASE | ||||
) | ) | ||||
self.fix_re = re.compile( | self.fix_re = re.compile( | ||||
self.ui.config(b'bugzilla', b'fixregexp'), re.IGNORECASE | self.ui.config(b'bugzilla', b'fixregexp'), re.IGNORECASE | ||||
) | ) | ||||
self.split_re = re.compile(br'\D+') | self.split_re = re.compile(br'\D+') | ||||
def find_bugs(self, ctx): | def find_bugs(self, ctx): | ||||
'''return bugs dictionary created from commit comment. | """return bugs dictionary created from commit comment. | ||||
Extract bug info from changeset comments. Filter out any that are | Extract bug info from changeset comments. Filter out any that are | ||||
not known to Bugzilla, and any that already have a reference to | not known to Bugzilla, and any that already have a reference to | ||||
the given changeset in their comments. | the given changeset in their comments. | ||||
''' | """ | ||||
start = 0 | start = 0 | ||||
bugs = {} | bugs = {} | ||||
bugmatch = self.bug_re.search(ctx.description(), start) | bugmatch = self.bug_re.search(ctx.description(), start) | ||||
fixmatch = self.fix_re.search(ctx.description(), start) | fixmatch = self.fix_re.search(ctx.description(), start) | ||||
while True: | while True: | ||||
bugattribs = {} | bugattribs = {} | ||||
if not bugmatch and not fixmatch: | if not bugmatch and not fixmatch: | ||||
break | break | ||||
if bugs: | if bugs: | ||||
self.bzdriver.filter_cset_known_bug_ids(ctx.node(), bugs) | self.bzdriver.filter_cset_known_bug_ids(ctx.node(), bugs) | ||||
return bugs | return bugs | ||||
def update(self, bugid, newstate, ctx): | def update(self, bugid, newstate, ctx): | ||||
'''update bugzilla bug with reference to changeset.''' | '''update bugzilla bug with reference to changeset.''' | ||||
def webroot(root): | def webroot(root): | ||||
'''strip leading prefix of repo root and turn into | """strip leading prefix of repo root and turn into | ||||
url-safe path.''' | url-safe path.""" | ||||
count = int(self.ui.config(b'bugzilla', b'strip')) | count = int(self.ui.config(b'bugzilla', b'strip')) | ||||
root = util.pconvert(root) | root = util.pconvert(root) | ||||
while count > 0: | while count > 0: | ||||
c = root.find(b'/') | c = root.find(b'/') | ||||
if c == -1: | if c == -1: | ||||
break | break | ||||
root = root[c + 1 :] | root = root[c + 1 :] | ||||
count -= 1 | count -= 1 | ||||
) | ) | ||||
def notify(self, bugs, committer): | def notify(self, bugs, committer): | ||||
'''ensure Bugzilla users are notified of bug change.''' | '''ensure Bugzilla users are notified of bug change.''' | ||||
self.bzdriver.notify(bugs, committer) | self.bzdriver.notify(bugs, committer) | ||||
def hook(ui, repo, hooktype, node=None, **kwargs): | def hook(ui, repo, hooktype, node=None, **kwargs): | ||||
'''add comment to bugzilla for each changeset that refers to a | """add comment to bugzilla for each changeset that refers to a | ||||
bugzilla bug id. only add a comment once per bug, so same change | bugzilla bug id. only add a comment once per bug, so same change | ||||
seen multiple times does not fill bug with duplicate data.''' | seen multiple times does not fill bug with duplicate data.""" | ||||
if node is None: | if node is None: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'hook type %s does not pass a changeset id') % hooktype | _(b'hook type %s does not pass a changeset id') % hooktype | ||||
) | ) | ||||
try: | try: | ||||
bz = bugzilla(ui, repo) | bz = bugzilla(ui, repo) | ||||
ctx = repo[node] | ctx = repo[node] | ||||
bugs = bz.find_bugs(ctx) | bugs = bz.find_bugs(ctx) | ||||
if bugs: | if bugs: | ||||
for bug in bugs: | for bug in bugs: | ||||
bz.update(bug, bugs[bug], ctx) | bz.update(bug, bugs[bug], ctx) | ||||
bz.notify(bugs, stringutil.email(ctx.user())) | bz.notify(bugs, stringutil.email(ctx.user())) | ||||
except Exception as e: | except Exception as e: | ||||
raise error.Abort(_(b'Bugzilla error: %s') % e) | raise error.Abort(_(b'Bugzilla error: %s') % e) |
(b'', b'aliases', b'', _(b'file with email aliases'), _(b'FILE')), | (b'', b'aliases', b'', _(b'file with email aliases'), _(b'FILE')), | ||||
] | ] | ||||
+ cmdutil.walkopts, | + cmdutil.walkopts, | ||||
_(b"hg churn [-d DATE] [-r REV] [--aliases FILE] [FILE]"), | _(b"hg churn [-d DATE] [-r REV] [--aliases FILE] [FILE]"), | ||||
helpcategory=command.CATEGORY_MAINTENANCE, | helpcategory=command.CATEGORY_MAINTENANCE, | ||||
inferrepo=True, | inferrepo=True, | ||||
) | ) | ||||
def churn(ui, repo, *pats, **opts): | def churn(ui, repo, *pats, **opts): | ||||
'''histogram of changes to the repository | """histogram of changes to the repository | ||||
This command will display a histogram representing the number | This command will display a histogram representing the number | ||||
of changed lines or revisions, grouped according to the given | of changed lines or revisions, grouped according to the given | ||||
template. The default template will group changes by author. | template. The default template will group changes by author. | ||||
The --dateformat option may be used to group the results by | The --dateformat option may be used to group the results by | ||||
date instead. | date instead. | ||||
Statistics are based on the number of changed lines, or | Statistics are based on the number of changed lines, or | ||||
It is possible to map alternate email addresses to a main address | It is possible to map alternate email addresses to a main address | ||||
by providing a file using the following format:: | by providing a file using the following format:: | ||||
<alias email> = <actual email> | <alias email> = <actual email> | ||||
Such a file may be specified with the --aliases option, otherwise | Such a file may be specified with the --aliases option, otherwise | ||||
a .hgchurn file will be looked for in the working directory root. | a .hgchurn file will be looked for in the working directory root. | ||||
Aliases will be split from the rightmost "=". | Aliases will be split from the rightmost "=". | ||||
''' | """ | ||||
def pad(s, l): | def pad(s, l): | ||||
return s + b" " * (l - encoding.colwidth(s)) | return s + b" " * (l - encoding.colwidth(s)) | ||||
amap = {} | amap = {} | ||||
aliases = opts.get('aliases') | aliases = opts.get('aliases') | ||||
if not aliases and os.path.exists(repo.wjoin(b'.hgchurn')): | if not aliases and os.path.exists(repo.wjoin(b'.hgchurn')): | ||||
aliases = repo.wjoin(b'.hgchurn') | aliases = repo.wjoin(b'.hgchurn') |
), | ), | ||||
# Options that are ignored for compatibility with cvsps-2.1 | # Options that are ignored for compatibility with cvsps-2.1 | ||||
(b'A', b'cvs-direct', None, _(b'ignored for compatibility')), | (b'A', b'cvs-direct', None, _(b'ignored for compatibility')), | ||||
], | ], | ||||
_(b'hg debugcvsps [OPTION]... [PATH]...'), | _(b'hg debugcvsps [OPTION]... [PATH]...'), | ||||
norepo=True, | norepo=True, | ||||
) | ) | ||||
def debugcvsps(ui, *args, **opts): | def debugcvsps(ui, *args, **opts): | ||||
'''create changeset information from CVS | """create changeset information from CVS | ||||
This command is intended as a debugging tool for the CVS to | This command is intended as a debugging tool for the CVS to | ||||
Mercurial converter, and can be used as a direct replacement for | Mercurial converter, and can be used as a direct replacement for | ||||
cvsps. | cvsps. | ||||
Hg debugcvsps reads the CVS rlog for current directory (or any | Hg debugcvsps reads the CVS rlog for current directory (or any | ||||
named directory) in the CVS repository, and converts the log to a | named directory) in the CVS repository, and converts the log to a | ||||
series of changesets based on matching commit log entries and | series of changesets based on matching commit log entries and | ||||
dates.''' | dates.""" | ||||
return cvsps.debugcvsps(ui, *args, **opts) | return cvsps.debugcvsps(ui, *args, **opts) | ||||
def kwconverted(context, mapping, name): | def kwconverted(context, mapping, name): | ||||
ctx = context.resource(mapping, b'ctx') | ctx = context.resource(mapping, b'ctx') | ||||
rev = ctx.extra().get(b'convert_revision', b'') | rev = ctx.extra().get(b'convert_revision', b'') | ||||
if rev.startswith(b'svn:'): | if rev.startswith(b'svn:'): | ||||
if name == b'svnrev': | if name == b'svnrev': |
demandimport, | demandimport, | ||||
error, | error, | ||||
pycompat, | pycompat, | ||||
) | ) | ||||
from . import common | from . import common | ||||
# these do not work with demandimport, blacklist | # these do not work with demandimport, blacklist | ||||
demandimport.IGNORES.update( | demandimport.IGNORES.update( | ||||
[b'bzrlib.transactions', b'bzrlib.urlutils', b'ElementPath',] | [ | ||||
b'bzrlib.transactions', | |||||
b'bzrlib.urlutils', | |||||
b'ElementPath', | |||||
] | |||||
) | ) | ||||
try: | try: | ||||
# bazaar imports | # bazaar imports | ||||
import bzrlib.bzrdir | import bzrlib.bzrdir | ||||
import bzrlib.errors | import bzrlib.errors | ||||
import bzrlib.revision | import bzrlib.revision | ||||
import bzrlib.revisionspec | import bzrlib.revisionspec |
self.ui = ui | self.ui = ui | ||||
self.path = path | self.path = path | ||||
self.revs = revs | self.revs = revs | ||||
self.repotype = repotype | self.repotype = repotype | ||||
self.encoding = b'utf-8' | self.encoding = b'utf-8' | ||||
def checkhexformat(self, revstr, mapname=b'splicemap'): | def checkhexformat(self, revstr, mapname=b'splicemap'): | ||||
""" fails if revstr is not a 40 byte hex. mercurial and git both uses | """fails if revstr is not a 40 byte hex. mercurial and git both uses | ||||
such format for their revision numbering | such format for their revision numbering | ||||
""" | """ | ||||
if not re.match(br'[0-9a-fA-F]{40,40}$', revstr): | if not re.match(br'[0-9a-fA-F]{40,40}$', revstr): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'%s entry %s is not a valid revision identifier') | _(b'%s entry %s is not a valid revision identifier') | ||||
% (mapname, revstr) | % (mapname, revstr) | ||||
) | ) | ||||
def before(self): | def before(self): | ||||
def hasnativeorder(self): | def hasnativeorder(self): | ||||
"""Return true if this source has a meaningful, native revision | """Return true if this source has a meaningful, native revision | ||||
order. For instance, Mercurial revisions are store sequentially | order. For instance, Mercurial revisions are store sequentially | ||||
while there is no such global ordering with Darcs. | while there is no such global ordering with Darcs. | ||||
""" | """ | ||||
return False | return False | ||||
def hasnativeclose(self): | def hasnativeclose(self): | ||||
"""Return true if this source has ability to close branch. | """Return true if this source has ability to close branch.""" | ||||
""" | |||||
return False | return False | ||||
def lookuprev(self, rev): | def lookuprev(self, rev): | ||||
"""If rev is a meaningful revision reference in source, return | """If rev is a meaningful revision reference in source, return | ||||
the referenced identifier in the same format used by getcommit(). | the referenced identifier in the same format used by getcommit(). | ||||
return None otherwise. | return None otherwise. | ||||
""" | """ | ||||
return None | return None | ||||
def getbookmarks(self): | def getbookmarks(self): | ||||
"""Return the bookmarks as a dictionary of name: revision | """Return the bookmarks as a dictionary of name: revision | ||||
Bookmark names are to be UTF-8 strings. | Bookmark names are to be UTF-8 strings. | ||||
""" | """ | ||||
return {} | return {} | ||||
def checkrevformat(self, revstr, mapname=b'splicemap'): | def checkrevformat(self, revstr, mapname=b'splicemap'): | ||||
"""revstr is a string that describes a revision in the given | """revstr is a string that describes a revision in the given | ||||
source control system. Return true if revstr has correct | source control system. Return true if revstr has correct | ||||
format. | format. | ||||
""" | """ | ||||
return True | return True | ||||
class converter_sink(object): | class converter_sink(object): | ||||
"""Conversion sink (target) interface""" | """Conversion sink (target) interface""" | ||||
def __init__(self, ui, repotype, path): | def __init__(self, ui, repotype, path): |
return s.encode(pycompat.sysstr(orig_encoding), 'replace') | return s.encode(pycompat.sysstr(orig_encoding), 'replace') | ||||
else: | else: | ||||
return s.decode('utf-8').encode( | return s.decode('utf-8').encode( | ||||
pycompat.sysstr(orig_encoding), 'replace' | pycompat.sysstr(orig_encoding), 'replace' | ||||
) | ) | ||||
def mapbranch(branch, branchmap): | def mapbranch(branch, branchmap): | ||||
''' | """ | ||||
>>> bmap = {b'default': b'branch1'} | >>> bmap = {b'default': b'branch1'} | ||||
>>> for i in [b'', None]: | >>> for i in [b'', None]: | ||||
... mapbranch(i, bmap) | ... mapbranch(i, bmap) | ||||
'branch1' | 'branch1' | ||||
'branch1' | 'branch1' | ||||
>>> bmap = {b'None': b'branch2'} | >>> bmap = {b'None': b'branch2'} | ||||
>>> for i in [b'', None]: | >>> for i in [b'', None]: | ||||
... mapbranch(i, bmap) | ... mapbranch(i, bmap) | ||||
'branch2' | 'branch2' | ||||
'branch2' | 'branch2' | ||||
>>> bmap = {b'None': b'branch3', b'default': b'branch4'} | >>> bmap = {b'None': b'branch3', b'default': b'branch4'} | ||||
>>> for i in [b'None', b'', None, b'default', b'branch5']: | >>> for i in [b'None', b'', None, b'default', b'branch5']: | ||||
... mapbranch(i, bmap) | ... mapbranch(i, bmap) | ||||
'branch3' | 'branch3' | ||||
'branch4' | 'branch4' | ||||
'branch4' | 'branch4' | ||||
'branch4' | 'branch4' | ||||
'branch5' | 'branch5' | ||||
''' | """ | ||||
# If branch is None or empty, this commit is coming from the source | # If branch is None or empty, this commit is coming from the source | ||||
# repository's default branch and destined for the default branch in the | # repository's default branch and destined for the default branch in the | ||||
# destination repository. For such commits, using a literal "default" | # destination repository. For such commits, using a literal "default" | ||||
# in branchmap below allows the user to map "default" to an alternate | # in branchmap below allows the user to map "default" to an alternate | ||||
# default branch in the destination repository. | # default branch in the destination repository. | ||||
branch = branchmap.get(branch or b'default', branch) | branch = branchmap.get(branch or b'default', branch) | ||||
# At some point we used "None" literal to denote the default branch, | # At some point we used "None" literal to denote the default branch, | ||||
# attempt to use that for backward compatibility. | # attempt to use that for backward compatibility. | ||||
if opts.get(b'authormap'): | if opts.get(b'authormap'): | ||||
self.readauthormap(opts.get(b'authormap')) | self.readauthormap(opts.get(b'authormap')) | ||||
self.authorfile = self.dest.authorfile() | self.authorfile = self.dest.authorfile() | ||||
self.splicemap = self.parsesplicemap(opts.get(b'splicemap')) | self.splicemap = self.parsesplicemap(opts.get(b'splicemap')) | ||||
self.branchmap = mapfile(ui, opts.get(b'branchmap')) | self.branchmap = mapfile(ui, opts.get(b'branchmap')) | ||||
def parsesplicemap(self, path): | def parsesplicemap(self, path): | ||||
""" check and validate the splicemap format and | """check and validate the splicemap format and | ||||
return a child/parents dictionary. | return a child/parents dictionary. | ||||
Format checking has two parts. | Format checking has two parts. | ||||
1. generic format which is same across all source types | 1. generic format which is same across all source types | ||||
2. specific format checking which may be different for | 2. specific format checking which may be different for | ||||
different source type. This logic is implemented in | different source type. This logic is implemented in | ||||
checkrevformat function in source files like | checkrevformat function in source files like | ||||
hg.py, subversion.py etc. | hg.py, subversion.py etc. | ||||
""" | """ | ||||
if not path: | if not path: | ||||
return {} | return {} | ||||
m = {} | m = {} | ||||
try: | try: | ||||
fp = open(path, b'rb') | fp = open(path, b'rb') | ||||
for i, line in enumerate(util.iterfile(fp)): | for i, line in enumerate(util.iterfile(fp)): | ||||
# if file does not exist or error reading, exit | # if file does not exist or error reading, exit | ||||
except IOError: | except IOError: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'splicemap file not found or error reading %s:') % path | _(b'splicemap file not found or error reading %s:') % path | ||||
) | ) | ||||
return m | return m | ||||
def walktree(self, heads): | def walktree(self, heads): | ||||
'''Return a mapping that identifies the uncommitted parents of every | """Return a mapping that identifies the uncommitted parents of every | ||||
uncommitted changeset.''' | uncommitted changeset.""" | ||||
visit = list(heads) | visit = list(heads) | ||||
known = set() | known = set() | ||||
parents = {} | parents = {} | ||||
numcommits = self.source.numcommits() | numcommits = self.source.numcommits() | ||||
progress = self.ui.makeprogress( | progress = self.ui.makeprogress( | ||||
_(b'scanning'), unit=_(b'revisions'), total=numcommits | _(b'scanning'), unit=_(b'revisions'), total=numcommits | ||||
) | ) | ||||
while visit: | while visit: | ||||
continue | continue | ||||
# Parent is not in dest and not being converted, not good | # Parent is not in dest and not being converted, not good | ||||
if p not in parents: | if p not in parents: | ||||
raise error.Abort(_(b'unknown splice map parent: %s') % p) | raise error.Abort(_(b'unknown splice map parent: %s') % p) | ||||
pc.append(p) | pc.append(p) | ||||
parents[c] = pc | parents[c] = pc | ||||
def toposort(self, parents, sortmode): | def toposort(self, parents, sortmode): | ||||
'''Return an ordering such that every uncommitted changeset is | """Return an ordering such that every uncommitted changeset is | ||||
preceded by all its uncommitted ancestors.''' | preceded by all its uncommitted ancestors.""" | ||||
def mapchildren(parents): | def mapchildren(parents): | ||||
"""Return a (children, roots) tuple where 'children' maps parent | """Return a (children, roots) tuple where 'children' maps parent | ||||
revision identifiers to children ones, and 'roots' is the list of | revision identifiers to children ones, and 'roots' is the list of | ||||
revisions without parents. 'parents' must be a mapping of revision | revisions without parents. 'parents' must be a mapping of revision | ||||
identifier to its parents ones. | identifier to its parents ones. | ||||
""" | """ | ||||
visit = collections.deque(sorted(parents)) | visit = collections.deque(sorted(parents)) |
procutil, | procutil, | ||||
stringutil, | stringutil, | ||||
) | ) | ||||
pickle = util.pickle | pickle = util.pickle | ||||
class logentry(object): | class logentry(object): | ||||
'''Class logentry has the following attributes: | """Class logentry has the following attributes: | ||||
.author - author name as CVS knows it | .author - author name as CVS knows it | ||||
.branch - name of branch this revision is on | .branch - name of branch this revision is on | ||||
.branches - revision tuple of branches starting at this revision | .branches - revision tuple of branches starting at this revision | ||||
.comment - commit message | .comment - commit message | ||||
.commitid - CVS commitid or None | .commitid - CVS commitid or None | ||||
.date - the commit date as a (time, tz) tuple | .date - the commit date as a (time, tz) tuple | ||||
.dead - true if file revision is dead | .dead - true if file revision is dead | ||||
.file - Name of file | .file - Name of file | ||||
.lines - a tuple (+lines, -lines) or None | .lines - a tuple (+lines, -lines) or None | ||||
.parent - Previous revision of this entry | .parent - Previous revision of this entry | ||||
.rcs - name of file as returned from CVS | .rcs - name of file as returned from CVS | ||||
.revision - revision number as tuple | .revision - revision number as tuple | ||||
.tags - list of tags on the file | .tags - list of tags on the file | ||||
.synthetic - is this a synthetic "file ... added on ..." revision? | .synthetic - is this a synthetic "file ... added on ..." revision? | ||||
.mergepoint - the branch that has been merged from (if present in | .mergepoint - the branch that has been merged from (if present in | ||||
rlog output) or None | rlog output) or None | ||||
.branchpoints - the branches that start at the current entry or empty | .branchpoints - the branches that start at the current entry or empty | ||||
''' | """ | ||||
def __init__(self, **entries): | def __init__(self, **entries): | ||||
self.synthetic = False | self.synthetic = False | ||||
self.__dict__.update(entries) | self.__dict__.update(entries) | ||||
def __repr__(self): | def __repr__(self): | ||||
items = ("%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__)) | items = ("%s=%r" % (k, self.__dict__[k]) for k in sorted(self.__dict__)) | ||||
return "%s(%s)" % (type(self).__name__, ", ".join(items)) | return "%s(%s)" % (type(self).__name__, ", ".join(items)) | ||||
) | ) | ||||
hook.hook(ui, None, b"cvslog", True, log=log) | hook.hook(ui, None, b"cvslog", True, log=log) | ||||
return log | return log | ||||
class changeset(object): | class changeset(object): | ||||
'''Class changeset has the following attributes: | """Class changeset has the following attributes: | ||||
.id - integer identifying this changeset (list index) | .id - integer identifying this changeset (list index) | ||||
.author - author name as CVS knows it | .author - author name as CVS knows it | ||||
.branch - name of branch this changeset is on, or None | .branch - name of branch this changeset is on, or None | ||||
.comment - commit message | .comment - commit message | ||||
.commitid - CVS commitid or None | .commitid - CVS commitid or None | ||||
.date - the commit date as a (time,tz) tuple | .date - the commit date as a (time,tz) tuple | ||||
.entries - list of logentry objects in this changeset | .entries - list of logentry objects in this changeset | ||||
.parents - list of one or two parent changesets | .parents - list of one or two parent changesets | ||||
.tags - list of tags on this changeset | .tags - list of tags on this changeset | ||||
.synthetic - from synthetic revision "file ... added on branch ..." | .synthetic - from synthetic revision "file ... added on branch ..." | ||||
.mergepoint- the branch that has been merged from or None | .mergepoint- the branch that has been merged from or None | ||||
.branchpoints- the branches that start at the current entry or empty | .branchpoints- the branches that start at the current entry or empty | ||||
''' | """ | ||||
def __init__(self, **entries): | def __init__(self, **entries): | ||||
self.id = None | self.id = None | ||||
self.synthetic = False | self.synthetic = False | ||||
self.__dict__.update(entries) | self.__dict__.update(entries) | ||||
def __repr__(self): | def __repr__(self): | ||||
items = ( | items = ( | ||||
ui.status(_(b'%d changeset entries\n') % len(changesets)) | ui.status(_(b'%d changeset entries\n') % len(changesets)) | ||||
hook.hook(ui, None, b"cvschangesets", True, changesets=changesets) | hook.hook(ui, None, b"cvschangesets", True, changesets=changesets) | ||||
return changesets | return changesets | ||||
def debugcvsps(ui, *args, **opts): | def debugcvsps(ui, *args, **opts): | ||||
'''Read CVS rlog for current directory or named path in | """Read CVS rlog for current directory or named path in | ||||
repository, and convert the log to changesets based on matching | repository, and convert the log to changesets based on matching | ||||
commit log entries and dates. | commit log entries and dates. | ||||
''' | """ | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
if opts[b"new_cache"]: | if opts[b"new_cache"]: | ||||
cache = b"write" | cache = b"write" | ||||
elif opts[b"update_cache"]: | elif opts[b"update_cache"]: | ||||
cache = b"update" | cache = b"update" | ||||
else: | else: | ||||
cache = None | cache = None | ||||
pycompat, | pycompat, | ||||
) | ) | ||||
from . import common | from . import common | ||||
SKIPREV = common.SKIPREV | SKIPREV = common.SKIPREV | ||||
def rpairs(path): | def rpairs(path): | ||||
'''Yield tuples with path split at '/', starting with the full path. | """Yield tuples with path split at '/', starting with the full path. | ||||
No leading, trailing or double '/', please. | No leading, trailing or double '/', please. | ||||
>>> for x in rpairs(b'foo/bar/baz'): print(x) | >>> for x in rpairs(b'foo/bar/baz'): print(x) | ||||
('foo/bar/baz', '') | ('foo/bar/baz', '') | ||||
('foo/bar', 'baz') | ('foo/bar', 'baz') | ||||
('foo', 'bar/baz') | ('foo', 'bar/baz') | ||||
('.', 'foo/bar/baz') | ('.', 'foo/bar/baz') | ||||
''' | """ | ||||
i = len(path) | i = len(path) | ||||
while i != -1: | while i != -1: | ||||
yield path[:i], path[i + 1 :] | yield path[:i], path[i + 1 :] | ||||
i = path.rfind(b'/', 0, i) | i = path.rfind(b'/', 0, i) | ||||
yield b'.', path | yield b'.', path | ||||
def normalize(path): | def normalize(path): | ||||
''' We use posixpath.normpath to support cross-platform path format. | """We use posixpath.normpath to support cross-platform path format. | ||||
However, it doesn't handle None input. So we wrap it up. ''' | However, it doesn't handle None input. So we wrap it up.""" | ||||
if path is None: | if path is None: | ||||
return None | return None | ||||
return posixpath.normpath(path) | return posixpath.normpath(path) | ||||
class filemapper(object): | class filemapper(object): | ||||
'''Map and filter filenames when importing. | """Map and filter filenames when importing. | ||||
A name can be mapped to itself, a new name, or None (omit from new | A name can be mapped to itself, a new name, or None (omit from new | ||||
repository).''' | repository).""" | ||||
def __init__(self, ui, path=None): | def __init__(self, ui, path=None): | ||||
self.ui = ui | self.ui = ui | ||||
self.include = {} | self.include = {} | ||||
self.exclude = {} | self.exclude = {} | ||||
self.rename = {} | self.rename = {} | ||||
self.targetprefixes = None | self.targetprefixes = None | ||||
if path: | if path: |
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'eol', b'fix-trailing-newline', default=False, | b'eol', | ||||
b'fix-trailing-newline', | |||||
default=False, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'eol', b'native', default=pycompat.oslinesep, | b'eol', | ||||
b'native', | |||||
default=pycompat.oslinesep, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'eol', b'only-consistent', default=True, | b'eol', | ||||
b'only-consistent', | |||||
default=True, | |||||
) | ) | ||||
# Matches a lone LF, i.e., one that is not part of CRLF. | # Matches a lone LF, i.e., one that is not part of CRLF. | ||||
singlelf = re.compile(b'(^|[^\r])\n') | singlelf = re.compile(b'(^|[^\r])\n') | ||||
def inconsistenteol(data): | def inconsistenteol(data): | ||||
return b'\r\n' in data and singlelf.search(data) | return b'\r\n' in data and singlelf.search(data) |
cmdtable = {} | cmdtable = {} | ||||
command = registrar.command(cmdtable) | command = registrar.command(cmdtable) | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'extdiff', br'opts\..*', default=b'', generic=True, | b'extdiff', | ||||
br'opts\..*', | |||||
default=b'', | |||||
generic=True, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'extdiff', br'gui\..*', generic=True, | b'extdiff', | ||||
br'gui\..*', | |||||
generic=True, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'diff-tools', br'.*\.diffargs$', default=None, generic=True, | b'diff-tools', | ||||
br'.*\.diffargs$', | |||||
default=None, | |||||
generic=True, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'diff-tools', br'.*\.gui$', generic=True, | b'diff-tools', | ||||
br'.*\.gui$', | |||||
generic=True, | |||||
) | ) | ||||
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | ||||
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | ||||
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
def snapshot(ui, repo, files, node, tmproot, listsubrepos): | def snapshot(ui, repo, files, node, tmproot, listsubrepos): | ||||
'''snapshot files as of some revision | """snapshot files as of some revision | ||||
if not using snapshot, -I/-X does not work and recursive diff | if not using snapshot, -I/-X does not work and recursive diff | ||||
in tools like kdiff3 and meld displays too many files.''' | in tools like kdiff3 and meld displays too many files.""" | ||||
dirname = os.path.basename(repo.root) | dirname = os.path.basename(repo.root) | ||||
if dirname == b"": | if dirname == b"": | ||||
dirname = b"root" | dirname = b"root" | ||||
if node is not None: | if node is not None: | ||||
dirname = b'%s.%s' % (dirname, short(node)) | dirname = b'%s.%s' % (dirname, short(node)) | ||||
base = os.path.join(tmproot, dirname) | base = os.path.join(tmproot, dirname) | ||||
os.mkdir(base) | os.mkdir(base) | ||||
fnsandstat = [] | fnsandstat = [] | ||||
br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1' | br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1' | ||||
) | ) | ||||
if not do3way and not re.search(regex, cmdline): | if not do3way and not re.search(regex, cmdline): | ||||
cmdline += b' $parent1 $child' | cmdline += b' $parent1 $child' | ||||
return re.sub(regex, quote, cmdline) | return re.sub(regex, quote, cmdline) | ||||
def _systembackground(cmd, environ=None, cwd=None): | def _systembackground(cmd, environ=None, cwd=None): | ||||
''' like 'procutil.system', but returns the Popen object directly | """like 'procutil.system', but returns the Popen object directly | ||||
so we don't have to wait on it. | so we don't have to wait on it. | ||||
''' | """ | ||||
env = procutil.shellenviron(environ) | env = procutil.shellenviron(environ) | ||||
proc = subprocess.Popen( | proc = subprocess.Popen( | ||||
procutil.tonativestr(cmd), | procutil.tonativestr(cmd), | ||||
shell=True, | shell=True, | ||||
close_fds=procutil.closefds, | close_fds=procutil.closefds, | ||||
env=procutil.tonativeenv(env), | env=procutil.tonativeenv(env), | ||||
cwd=pycompat.rapply(procutil.tonativestr, cwd), | cwd=pycompat.rapply(procutil.tonativestr, cwd), | ||||
) | ) | ||||
b'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn) | b'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn) | ||||
) | ) | ||||
util.copyfile(copy_fn, working_fn) | util.copyfile(copy_fn, working_fn) | ||||
return 1 | return 1 | ||||
def dodiff(ui, repo, cmdline, pats, opts, guitool=False): | def dodiff(ui, repo, cmdline, pats, opts, guitool=False): | ||||
'''Do the actual diff: | """Do the actual diff: | ||||
- copy to a temp structure if diffing 2 internal revisions | - copy to a temp structure if diffing 2 internal revisions | ||||
- copy to a temp structure if diffing working revision with | - copy to a temp structure if diffing working revision with | ||||
another one and more than 1 file is changed | another one and more than 1 file is changed | ||||
- just invoke the diff for a single file in the working dir | - just invoke the diff for a single file in the working dir | ||||
''' | """ | ||||
cmdutil.check_at_most_one_arg(opts, b'rev', b'change') | cmdutil.check_at_most_one_arg(opts, b'rev', b'change') | ||||
revs = opts.get(b'rev') | revs = opts.get(b'rev') | ||||
change = opts.get(b'change') | change = opts.get(b'change') | ||||
do3way = b'$parent2' in cmdline | do3way = b'$parent2' in cmdline | ||||
if change: | if change: | ||||
ctx2 = scmutil.revsingle(repo, change, None) | ctx2 = scmutil.revsingle(repo, change, None) | ||||
] | ] | ||||
+ cmdutil.walkopts | + cmdutil.walkopts | ||||
+ cmdutil.subrepoopts | + cmdutil.subrepoopts | ||||
) | ) | ||||
@command( | @command( | ||||
b'extdiff', | b'extdiff', | ||||
[(b'p', b'program', b'', _(b'comparison program to run'), _(b'CMD')),] | [ | ||||
(b'p', b'program', b'', _(b'comparison program to run'), _(b'CMD')), | |||||
] | |||||
+ extdiffopts, | + extdiffopts, | ||||
_(b'hg extdiff [OPT]... [FILE]...'), | _(b'hg extdiff [OPT]... [FILE]...'), | ||||
helpcategory=command.CATEGORY_FILE_CONTENTS, | helpcategory=command.CATEGORY_FILE_CONTENTS, | ||||
inferrepo=True, | inferrepo=True, | ||||
) | ) | ||||
def extdiff(ui, repo, *pats, **opts): | def extdiff(ui, repo, *pats, **opts): | ||||
'''use external program to diff repository (or selected files) | """use external program to diff repository (or selected files) | ||||
Show differences between revisions for the specified files, using | Show differences between revisions for the specified files, using | ||||
an external program. The default program used is diff, with | an external program. The default program used is diff, with | ||||
default options "-Npru". | default options "-Npru". | ||||
To select a different program, use the -p/--program option. The | To select a different program, use the -p/--program option. The | ||||
program will be passed the names of two directories to compare, | program will be passed the names of two directories to compare, | ||||
unless the --per-file option is specified (see below). To pass | unless the --per-file option is specified (see below). To pass | ||||
external program only once the previous external program (for the | external program only once the previous external program (for the | ||||
previous file diff) has exited. If the external program has a | previous file diff) has exited. If the external program has a | ||||
graphical interface, it can open all the file diffs at once instead | graphical interface, it can open all the file diffs at once instead | ||||
of one by one. See :hg:`help -e extdiff` for information about how | of one by one. See :hg:`help -e extdiff` for information about how | ||||
to tell Mercurial that a given program has a graphical interface. | to tell Mercurial that a given program has a graphical interface. | ||||
The --confirm option will prompt the user before each invocation of | The --confirm option will prompt the user before each invocation of | ||||
the external program. It is ignored if --per-file isn't specified. | the external program. It is ignored if --per-file isn't specified. | ||||
''' | """ | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
program = opts.get(b'program') | program = opts.get(b'program') | ||||
option = opts.get(b'option') | option = opts.get(b'option') | ||||
if not program: | if not program: | ||||
program = b'diff' | program = b'diff' | ||||
option = option or [b'-Npru'] | option = option or [b'-Npru'] | ||||
cmdline = b' '.join(map(procutil.shellquote, [program] + option)) | cmdline = b' '.join(map(procutil.shellquote, [program] + option)) | ||||
return dodiff(ui, repo, cmdline, pats, opts) | return dodiff(ui, repo, cmdline, pats, opts) |
ERRMAX = 128 | ERRMAX = 128 | ||||
_executable = _mountpoint = _service = None | _executable = _mountpoint = _service = None | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'factotum', b'executable', default=b'/bin/auth/factotum', | b'factotum', | ||||
b'executable', | |||||
default=b'/bin/auth/factotum', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'factotum', b'mountpoint', default=b'/mnt/factotum', | b'factotum', | ||||
b'mountpoint', | |||||
default=b'/mnt/factotum', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'factotum', b'service', default=b'hg', | b'factotum', | ||||
b'service', | |||||
default=b'hg', | |||||
) | ) | ||||
def auth_getkey(self, params): | def auth_getkey(self, params): | ||||
if not self.ui.interactive(): | if not self.ui.interactive(): | ||||
raise error.Abort(_(b'factotum not interactive')) | raise error.Abort(_(b'factotum not interactive')) | ||||
if b'user=' not in params: | if b'user=' not in params: | ||||
params = b'%s user?' % params | params = b'%s user?' % params |
] | ] | ||||
+ cmdutil.commitopts | + cmdutil.commitopts | ||||
+ cmdutil.commitopts2 | + cmdutil.commitopts2 | ||||
+ cmdutil.remoteopts, | + cmdutil.remoteopts, | ||||
_(b'hg fetch [SOURCE]'), | _(b'hg fetch [SOURCE]'), | ||||
helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT, | helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT, | ||||
) | ) | ||||
def fetch(ui, repo, source=b'default', **opts): | def fetch(ui, repo, source=b'default', **opts): | ||||
'''pull changes from a remote repository, merge new changes if needed. | """pull changes from a remote repository, merge new changes if needed. | ||||
This finds all changes from the repository at the specified path | This finds all changes from the repository at the specified path | ||||
or URL and adds them to the local repository. | or URL and adds them to the local repository. | ||||
If the pulled changes add a new branch head, the head is | If the pulled changes add a new branch head, the head is | ||||
automatically merged, and the result of the merge is committed. | automatically merged, and the result of the merge is committed. | ||||
Otherwise, the working directory is updated to include the new | Otherwise, the working directory is updated to include the new | ||||
changes. | changes. | ||||
When a merge is needed, the working directory is first updated to | When a merge is needed, the working directory is first updated to | ||||
the newly pulled changes. Local changes are then merged into the | the newly pulled changes. Local changes are then merged into the | ||||
pulled changes. To switch the merge order, use --switch-parent. | pulled changes. To switch the merge order, use --switch-parent. | ||||
See :hg:`help dates` for a list of formats valid for -d/--date. | See :hg:`help dates` for a list of formats valid for -d/--date. | ||||
Returns 0 on success. | Returns 0 on success. | ||||
''' | """ | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
date = opts.get(b'date') | date = opts.get(b'date') | ||||
if date: | if date: | ||||
opts[b'date'] = dateutil.parsedate(date) | opts[b'date'] = dateutil.parsedate(date) | ||||
parent = repo.dirstate.p1() | parent = repo.dirstate.p1() | ||||
branch = repo.dirstate.branch() | branch = repo.dirstate.branch() |
""" | """ | ||||
replacements = { | replacements = { | ||||
prec: [succ] for prec, succ in pycompat.iteritems(replacements) | prec: [succ] for prec, succ in pycompat.iteritems(replacements) | ||||
} | } | ||||
scmutil.cleanupnodes(repo, replacements, b'fix', fixphase=True) | scmutil.cleanupnodes(repo, replacements, b'fix', fixphase=True) | ||||
def getworkqueue(ui, repo, pats, opts, revstofix, basectxs): | def getworkqueue(ui, repo, pats, opts, revstofix, basectxs): | ||||
""""Constructs the list of files to be fixed at specific revisions | """ "Constructs the list of files to be fixed at specific revisions | ||||
It is up to the caller how to consume the work items, and the only | It is up to the caller how to consume the work items, and the only | ||||
dependence between them is that replacement revisions must be committed in | dependence between them is that replacement revisions must be committed in | ||||
topological order. Each work item represents a file in the working copy or | topological order. Each work item represents a file in the working copy or | ||||
in some revision that should be fixed and written back to the working copy | in some revision that should be fixed and written back to the working copy | ||||
or into a replacement revision. | or into a replacement revision. | ||||
Work items for the same revision are grouped together, so that a worker | Work items for the same revision are grouped together, so that a worker |
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'fsmonitor', b'mode', default=b'on', | b'fsmonitor', | ||||
b'mode', | |||||
default=b'on', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'fsmonitor', b'walk_on_invalidate', default=False, | b'fsmonitor', | ||||
b'walk_on_invalidate', | |||||
default=False, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'fsmonitor', b'timeout', default=b'2', | b'fsmonitor', | ||||
b'timeout', | |||||
default=b'2', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'fsmonitor', b'blacklistusers', default=list, | b'fsmonitor', | ||||
b'blacklistusers', | |||||
default=list, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'fsmonitor', b'watchman_exe', default=b'watchman', | b'fsmonitor', | ||||
b'watchman_exe', | |||||
default=b'watchman', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'fsmonitor', b'verbose', default=True, experimental=True, | b'fsmonitor', | ||||
b'verbose', | |||||
default=True, | |||||
experimental=True, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'experimental', b'fsmonitor.transaction_notify', default=False, | b'experimental', | ||||
b'fsmonitor.transaction_notify', | |||||
default=False, | |||||
) | ) | ||||
# This extension is incompatible with the following blacklisted extensions | # This extension is incompatible with the following blacklisted extensions | ||||
# and will disable itself when encountering one of these: | # and will disable itself when encountering one of these: | ||||
_blacklist = [b'largefiles', b'eol'] | _blacklist = [b'largefiles', b'eol'] | ||||
def debuginstall(ui, fm): | def debuginstall(ui, fm): | ||||
encoded = decoded.encode(_fsencoding, 'strict') | encoded = decoded.encode(_fsencoding, 'strict') | ||||
except UnicodeEncodeError as e: | except UnicodeEncodeError as e: | ||||
raise error.Abort(stringutil.forcebytestr(e)) | raise error.Abort(stringutil.forcebytestr(e)) | ||||
return encoded | return encoded | ||||
def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True): | def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True): | ||||
'''Replacement for dirstate.walk, hooking into Watchman. | """Replacement for dirstate.walk, hooking into Watchman. | ||||
Whenever full is False, ignored is False, and the Watchman client is | Whenever full is False, ignored is False, and the Watchman client is | ||||
available, use Watchman combined with saved state to possibly return only a | available, use Watchman combined with saved state to possibly return only a | ||||
subset of files.''' | subset of files.""" | ||||
def bail(reason): | def bail(reason): | ||||
self._ui.debug(b'fsmonitor: fallback to core status, %s\n' % reason) | self._ui.debug(b'fsmonitor: fallback to core status, %s\n' % reason) | ||||
return orig(match, subrepos, unknown, ignored, full=True) | return orig(match, subrepos, unknown, ignored, full=True) | ||||
if full: | if full: | ||||
return bail(b'full rewalk requested') | return bail(b'full rewalk requested') | ||||
if ignored: | if ignored: | ||||
if pycompat.isdarwin: | if pycompat.isdarwin: | ||||
# An assist for avoiding the dangling-symlink fsevents bug | # An assist for avoiding the dangling-symlink fsevents bug | ||||
extensions.wrapfunction(os, b'symlink', wrapsymlink) | extensions.wrapfunction(os, b'symlink', wrapsymlink) | ||||
extensions.wrapfunction(merge, b'_update', wrapupdate) | extensions.wrapfunction(merge, b'_update', wrapupdate) | ||||
def wrapsymlink(orig, source, link_name): | def wrapsymlink(orig, source, link_name): | ||||
''' if we create a dangling symlink, also touch the parent dir | """if we create a dangling symlink, also touch the parent dir | ||||
to encourage fsevents notifications to work more correctly ''' | to encourage fsevents notifications to work more correctly""" | ||||
try: | try: | ||||
return orig(source, link_name) | return orig(source, link_name) | ||||
finally: | finally: | ||||
try: | try: | ||||
os.utime(os.path.dirname(link_name), None) | os.utime(os.path.dirname(link_name), None) | ||||
except OSError: | except OSError: | ||||
pass | pass | ||||
class state_update(object): | class state_update(object): | ||||
''' This context manager is responsible for dispatching the state-enter | """This context manager is responsible for dispatching the state-enter | ||||
and state-leave signals to the watchman service. The enter and leave | and state-leave signals to the watchman service. The enter and leave | ||||
methods can be invoked manually (for scenarios where context manager | methods can be invoked manually (for scenarios where context manager | ||||
semantics are not possible). If parameters oldnode and newnode are None, | semantics are not possible). If parameters oldnode and newnode are None, | ||||
they will be populated based on current working copy in enter and | they will be populated based on current working copy in enter and | ||||
leave, respectively. Similarly, if the distance is none, it will be | leave, respectively. Similarly, if the distance is none, it will be | ||||
calculated based on the oldnode and newnode in the leave method.''' | calculated based on the oldnode and newnode in the leave method.""" | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
repo, | repo, | ||||
name, | name, | ||||
oldnode=None, | oldnode=None, | ||||
newnode=None, | newnode=None, | ||||
distance=None, | distance=None, |
"unable to connect to %s: %s" % (sockpath, exc) | "unable to connect to %s: %s" % (sockpath, exc) | ||||
) | ) | ||||
self.sockpath = sockpath | self.sockpath = sockpath | ||||
self.exc = exc | self.exc = exc | ||||
class SocketTimeout(WatchmanError): | class SocketTimeout(WatchmanError): | ||||
"""A specialized exception raised for socket timeouts during communication to/from watchman. | """A specialized exception raised for socket timeouts during communication to/from watchman. | ||||
This makes it easier to implement non-blocking loops as callers can easily distinguish | This makes it easier to implement non-blocking loops as callers can easily distinguish | ||||
between a routine timeout and an actual error condition. | between a routine timeout and an actual error condition. | ||||
Note that catching WatchmanError will also catch this as it is a super-class, so backwards | Note that catching WatchmanError will also catch this as it is a super-class, so backwards | ||||
compatibility in exception handling is preserved. | compatibility in exception handling is preserved. | ||||
""" | """ | ||||
class CommandError(WatchmanError): | class CommandError(WatchmanError): | ||||
"""error returned by watchman | """error returned by watchman | ||||
self.msg is the message returned by watchman. | self.msg is the message returned by watchman. | ||||
""" | """ | ||||
def write(self, buf): | def write(self, buf): | ||||
""" write some data """ | """ write some data """ | ||||
raise NotImplementedError() | raise NotImplementedError() | ||||
def setTimeout(self, value): | def setTimeout(self, value): | ||||
pass | pass | ||||
def readLine(self): | def readLine(self): | ||||
""" read a line | """read a line | ||||
Maintains its own buffer, callers of the transport should not mix | Maintains its own buffer, callers of the transport should not mix | ||||
calls to readBytes and readLine. | calls to readBytes and readLine. | ||||
""" | """ | ||||
if self.buf is None: | if self.buf is None: | ||||
self.buf = [] | self.buf = [] | ||||
# Buffer may already have a line if we've received unilateral | # Buffer may already have a line if we've received unilateral | ||||
# response(s) from the server | # response(s) from the server | ||||
def write(self, data): | def write(self, data): | ||||
try: | try: | ||||
self.sock.sendall(data) | self.sock.sendall(data) | ||||
except socket.timeout: | except socket.timeout: | ||||
raise SocketTimeout("timed out sending query command") | raise SocketTimeout("timed out sending query command") | ||||
def _get_overlapped_result_ex_impl(pipe, olap, nbytes, millis, alertable): | def _get_overlapped_result_ex_impl(pipe, olap, nbytes, millis, alertable): | ||||
""" Windows 7 and earlier does not support GetOverlappedResultEx. The | """Windows 7 and earlier does not support GetOverlappedResultEx. The | ||||
alternative is to use GetOverlappedResult and wait for read or write | alternative is to use GetOverlappedResult and wait for read or write | ||||
operation to complete. This is done be using CreateEvent and | operation to complete. This is done be using CreateEvent and | ||||
WaitForSingleObjectEx. CreateEvent, WaitForSingleObjectEx | WaitForSingleObjectEx. CreateEvent, WaitForSingleObjectEx | ||||
and GetOverlappedResult are all part of Windows API since WindowsXP. | and GetOverlappedResult are all part of Windows API since WindowsXP. | ||||
This is the exact same implementation that can be found in the watchman | This is the exact same implementation that can be found in the watchman | ||||
source code (see get_overlapped_result_ex_impl in stream_win.c). This | source code (see get_overlapped_result_ex_impl in stream_win.c). This | ||||
way, maintenance should be simplified. | way, maintenance should be simplified. | ||||
""" | """ | ||||
CloseHandle(self._waitable) | CloseHandle(self._waitable) | ||||
self._waitable = None | self._waitable = None | ||||
def setTimeout(self, value): | def setTimeout(self, value): | ||||
# convert to milliseconds | # convert to milliseconds | ||||
self.timeout = int(value * 1000) | self.timeout = int(value * 1000) | ||||
def readBytes(self, size): | def readBytes(self, size): | ||||
""" A read can block for an unbounded amount of time, even if the | """A read can block for an unbounded amount of time, even if the | ||||
kernel reports that the pipe handle is signalled, so we need to | kernel reports that the pipe handle is signalled, so we need to | ||||
always perform our reads asynchronously | always perform our reads asynchronously | ||||
""" | """ | ||||
# try to satisfy the read from any buffered data | # try to satisfy the read from any buffered data | ||||
if self._iobuf: | if self._iobuf: | ||||
if size >= len(self._iobuf): | if size >= len(self._iobuf): | ||||
res = self._iobuf | res = self._iobuf | ||||
self.buf = None | self.buf = None | ||||
return res | return res | ||||
return binpath | return binpath | ||||
# The test harness sets WATCHMAN_BINARY to the binary under test, | # The test harness sets WATCHMAN_BINARY to the binary under test, | ||||
# so we use that by default, otherwise, allow resolving watchman | # so we use that by default, otherwise, allow resolving watchman | ||||
# from the users PATH. | # from the users PATH. | ||||
return os.environ.get("WATCHMAN_BINARY", "watchman") | return os.environ.get("WATCHMAN_BINARY", "watchman") | ||||
class CLIProcessTransport(Transport): | class CLIProcessTransport(Transport): | ||||
""" open a pipe to the cli to talk to the service | """open a pipe to the cli to talk to the service | ||||
This intended to be used only in the test harness! | This intended to be used only in the test harness! | ||||
The CLI is an oddball because we only support JSON input | The CLI is an oddball because we only support JSON input | ||||
and cannot send multiple commands through the same instance, | and cannot send multiple commands through the same instance, | ||||
so we spawn a new process for each command. | so we spawn a new process for each command. | ||||
We disable server spawning for this implementation, again, because | We disable server spawning for this implementation, again, because | ||||
it is intended to be used only in our test harness. You really | it is intended to be used only in our test harness. You really | ||||
raise WatchmanError("watchman response decode error: %s" % e) | raise WatchmanError("watchman response decode error: %s" % e) | ||||
def send(self, *args): | def send(self, *args): | ||||
cmd = bser.dumps(*args) # Defaults to BSER v1 | cmd = bser.dumps(*args) # Defaults to BSER v1 | ||||
self.transport.write(cmd) | self.transport.write(cmd) | ||||
class ImmutableBserCodec(BserCodec): | class ImmutableBserCodec(BserCodec): | ||||
""" use the BSER encoding, decoding values using the newer | """use the BSER encoding, decoding values using the newer | ||||
immutable object support """ | immutable object support""" | ||||
def _loads(self, response): | def _loads(self, response): | ||||
return bser.loads( | return bser.loads( | ||||
response, | response, | ||||
False, | False, | ||||
value_encoding=self._value_encoding, | value_encoding=self._value_encoding, | ||||
value_errors=self._value_errors, | value_errors=self._value_errors, | ||||
) | ) | ||||
capabilities=self.bser_capabilities | capabilities=self.bser_capabilities | ||||
) | ) | ||||
else: | else: | ||||
cmd = bser.dumps(*args) | cmd = bser.dumps(*args) | ||||
self.transport.write(cmd) | self.transport.write(cmd) | ||||
class ImmutableBser2Codec(Bser2WithFallbackCodec, ImmutableBserCodec): | class ImmutableBser2Codec(Bser2WithFallbackCodec, ImmutableBserCodec): | ||||
""" use the BSER encoding, decoding values using the newer | """use the BSER encoding, decoding values using the newer | ||||
immutable object support """ | immutable object support""" | ||||
pass | pass | ||||
class JsonCodec(Codec): | class JsonCodec(Codec): | ||||
""" Use json codec. This is here primarily for testing purposes """ | """ Use json codec. This is here primarily for testing purposes """ | ||||
json = None | json = None | ||||
def close(self): | def close(self): | ||||
if self.tport: | if self.tport: | ||||
self.tport.close() | self.tport.close() | ||||
self.tport = None | self.tport = None | ||||
self.recvConn = None | self.recvConn = None | ||||
self.sendConn = None | self.sendConn = None | ||||
def receive(self): | def receive(self): | ||||
""" receive the next PDU from the watchman service | """receive the next PDU from the watchman service | ||||
If the client has activated subscriptions or logs then | If the client has activated subscriptions or logs then | ||||
this PDU may be a unilateral PDU sent by the service to | this PDU may be a unilateral PDU sent by the service to | ||||
inform the client of a log event or subscription change. | inform the client of a log event or subscription change. | ||||
It may also simply be the response portion of a request | It may also simply be the response portion of a request | ||||
initiated by query. | initiated by query. | ||||
return True | return True | ||||
# Fall back to checking for known unilateral responses | # Fall back to checking for known unilateral responses | ||||
for k in self.unilateral: | for k in self.unilateral: | ||||
if k in res: | if k in res: | ||||
return True | return True | ||||
return False | return False | ||||
def getLog(self, remove=True): | def getLog(self, remove=True): | ||||
""" Retrieve buffered log data | """Retrieve buffered log data | ||||
If remove is true the data will be removed from the buffer. | If remove is true the data will be removed from the buffer. | ||||
Otherwise it will be left in the buffer | Otherwise it will be left in the buffer | ||||
""" | """ | ||||
res = self.logs | res = self.logs | ||||
if remove: | if remove: | ||||
self.logs = [] | self.logs = [] | ||||
return res | return res | ||||
def getSubscription(self, name, remove=True, root=None): | def getSubscription(self, name, remove=True, root=None): | ||||
""" Retrieve the data associated with a named subscription | """Retrieve the data associated with a named subscription | ||||
If remove is True (the default), the subscription data is removed | If remove is True (the default), the subscription data is removed | ||||
from the buffer. Otherwise the data is returned but left in | from the buffer. Otherwise the data is returned but left in | ||||
the buffer. | the buffer. | ||||
Returns None if there is no data associated with `name` | Returns None if there is no data associated with `name` | ||||
If root is not None, then only return the subscription | If root is not None, then only return the subscription | ||||
if name not in self.subs: | if name not in self.subs: | ||||
return None | return None | ||||
sub = self.subs[name] | sub = self.subs[name] | ||||
if remove: | if remove: | ||||
del self.subs[name] | del self.subs[name] | ||||
return sub | return sub | ||||
def query(self, *args): | def query(self, *args): | ||||
""" Send a query to the watchman service and return the response | """Send a query to the watchman service and return the response | ||||
This call will block until the response is returned. | This call will block until the response is returned. | ||||
If any unilateral responses are sent by the service in between | If any unilateral responses are sent by the service in between | ||||
the request-response they will be buffered up in the client object | the request-response they will be buffered up in the client object | ||||
and NOT returned via this method. | and NOT returned via this method. | ||||
""" | """ | ||||
log("calling client.query") | log("calling client.query") |
def check(version, name): | def check(version, name): | ||||
if name in cap_versions: | if name in cap_versions: | ||||
return version >= parse_version(cap_versions[name]) | return version >= parse_version(cap_versions[name]) | ||||
return False | return False | ||||
def synthesize(vers, opts): | def synthesize(vers, opts): | ||||
""" Synthesize a capability enabled version response | """Synthesize a capability enabled version response | ||||
This is a very limited emulation for relatively recent feature sets | This is a very limited emulation for relatively recent feature sets | ||||
""" | """ | ||||
parsed_version = parse_version(vers["version"]) | parsed_version = parse_version(vers["version"]) | ||||
vers["capabilities"] = {} | vers["capabilities"] = {} | ||||
for name in opts["optional"]: | for name in opts["optional"]: | ||||
vers["capabilities"][name] = check(parsed_version, name) | vers["capabilities"][name] = check(parsed_version, name) | ||||
for name in opts["required"]: | for name in opts["required"]: | ||||
have = check(parsed_version, name) | have = check(parsed_version, name) | ||||
vers["capabilities"][name] = have | vers["capabilities"][name] = have | ||||
if not have: | if not have: | ||||
vers["error"] = ( | vers["error"] = ( | ||||
"client required capability `" | "client required capability `" | ||||
+ name | + name | ||||
+ "` is not supported by this server" | + "` is not supported by this server" | ||||
) | ) | ||||
return vers | return vers |
gitutil, | gitutil, | ||||
index, | index, | ||||
) | ) | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
# git.log-index-cache-miss: internal knob for testing | # git.log-index-cache-miss: internal knob for testing | ||||
configitem( | configitem( | ||||
b"git", b"log-index-cache-miss", default=False, | b"git", | ||||
b"log-index-cache-miss", | |||||
default=False, | |||||
) | ) | ||||
# TODO: extract an interface for this in core | # TODO: extract an interface for this in core | ||||
class gitstore(object): # store.basicstore): | class gitstore(object): # store.basicstore): | ||||
def __init__(self, path, vfstype): | def __init__(self, path, vfstype): | ||||
self.vfs = vfstype(path) | self.vfs = vfstype(path) | ||||
self.path = self.vfs.base | self.path = self.vfs.base | ||||
self.createmode = store._calcmode(self.vfs) | self.createmode = store._calcmode(self.vfs) | ||||
def expandname(self, bname): | def expandname(self, bname): | ||||
if bname == b'.': | if bname == b'.': | ||||
if self.active: | if self.active: | ||||
return self.active | return self.active | ||||
raise error.RepoLookupError(_(b"no active bookmark")) | raise error.RepoLookupError(_(b"no active bookmark")) | ||||
return bname | return bname | ||||
def applychanges(self, repo, tr, changes): | def applychanges(self, repo, tr, changes): | ||||
"""Apply a list of changes to bookmarks | """Apply a list of changes to bookmarks""" | ||||
""" | |||||
# TODO: this should respect transactions, but that's going to | # TODO: this should respect transactions, but that's going to | ||||
# require enlarging the gitbmstore to know how to do in-memory | # require enlarging the gitbmstore to know how to do in-memory | ||||
# temporary writes and read those back prior to transaction | # temporary writes and read those back prior to transaction | ||||
# finalization. | # finalization. | ||||
for name, node in changes: | for name, node in changes: | ||||
if node is None: | if node is None: | ||||
self.gitrepo.references.delete( | self.gitrepo.references.delete( | ||||
_BMS_PREFIX + pycompat.fsdecode(name) | _BMS_PREFIX + pycompat.fsdecode(name) |
@util.propertycache | @util.propertycache | ||||
def _dirs(self): | def _dirs(self): | ||||
return pathutil.dirs(self) | return pathutil.dirs(self) | ||||
def hasdir(self, dir): | def hasdir(self, dir): | ||||
return dir in self._dirs | return dir in self._dirs | ||||
def diff(self, other, match=lambda x: True, clean=False): | def diff(self, other, match=lambda x: True, clean=False): | ||||
'''Finds changes between the current manifest and m2. | """Finds changes between the current manifest and m2. | ||||
The result is returned as a dict with filename as key and | The result is returned as a dict with filename as key and | ||||
values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the | values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the | ||||
nodeid in the current/other manifest and fl1/fl2 is the flag | nodeid in the current/other manifest and fl1/fl2 is the flag | ||||
in the current/other manifest. Where the file does not exist, | in the current/other manifest. Where the file does not exist, | ||||
the nodeid will be None and the flags will be the empty | the nodeid will be None and the flags will be the empty | ||||
string. | string. | ||||
''' | """ | ||||
result = {} | result = {} | ||||
def _iterativediff(t1, t2, subdir): | def _iterativediff(t1, t2, subdir): | ||||
"""compares two trees and appends new tree nodes to examine to | """compares two trees and appends new tree nodes to examine to | ||||
the stack""" | the stack""" | ||||
if t1 is None: | if t1 is None: | ||||
t1 = {} | t1 = {} | ||||
if t2 is None: | if t2 is None: |
@command( | @command( | ||||
b'githelp|git', | b'githelp|git', | ||||
[], | [], | ||||
_(b'hg githelp'), | _(b'hg githelp'), | ||||
helpcategory=command.CATEGORY_HELP, | helpcategory=command.CATEGORY_HELP, | ||||
helpbasic=True, | helpbasic=True, | ||||
) | ) | ||||
def githelp(ui, repo, *args, **kwargs): | def githelp(ui, repo, *args, **kwargs): | ||||
'''suggests the Mercurial equivalent of the given git command | """suggests the Mercurial equivalent of the given git command | ||||
Usage: hg githelp -- <git command> | Usage: hg githelp -- <git command> | ||||
''' | """ | ||||
if len(args) == 0 or (len(args) == 1 and args[0] == b'git'): | if len(args) == 0 or (len(args) == 1 and args[0] == b'git'): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'missing git command - usage: hg githelp -- <git command>') | _(b'missing git command - usage: hg githelp -- <git command>') | ||||
) | ) | ||||
if args[0] == b'git': | if args[0] == b'git': | ||||
args = args[1:] | args = args[1:] |
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'gpg', b'cmd', default=b'gpg', | b'gpg', | ||||
b'cmd', | |||||
default=b'gpg', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'gpg', b'key', default=None, | b'gpg', | ||||
b'key', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'gpg', b'.*', default=None, generic=True, | b'gpg', | ||||
b'.*', | |||||
default=None, | |||||
generic=True, | |||||
) | ) | ||||
# Custom help category | # Custom help category | ||||
_HELP_CATEGORY = b'gpg' | _HELP_CATEGORY = b'gpg' | ||||
help.CATEGORY_ORDER.insert( | help.CATEGORY_ORDER.insert( | ||||
help.CATEGORY_ORDER.index(registrar.command.CATEGORY_HELP), _HELP_CATEGORY | help.CATEGORY_ORDER.index(registrar.command.CATEGORY_HELP), _HELP_CATEGORY | ||||
) | ) | ||||
help.CATEGORY_NAMES[_HELP_CATEGORY] = b'Signing changes (GPG)' | help.CATEGORY_NAMES[_HELP_CATEGORY] = b'Signing changes (GPG)' | ||||
fp.write(sig) | fp.write(sig) | ||||
fp.close() | fp.close() | ||||
fd, datafile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".txt") | fd, datafile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".txt") | ||||
fp = os.fdopen(fd, 'wb') | fp = os.fdopen(fd, 'wb') | ||||
fp.write(data) | fp.write(data) | ||||
fp.close() | fp.close() | ||||
gpgcmd = ( | gpgcmd = ( | ||||
b"%s --logger-fd 1 --status-fd 1 --verify \"%s\" \"%s\"" | b"%s --logger-fd 1 --status-fd 1 --verify \"%s\" \"%s\"" | ||||
% (self.path, sigfile, datafile,) | % ( | ||||
self.path, | |||||
sigfile, | |||||
datafile, | |||||
) | |||||
) | ) | ||||
ret = procutil.filter(b"", gpgcmd) | ret = procutil.filter(b"", gpgcmd) | ||||
finally: | finally: | ||||
for f in (sigfile, datafile): | for f in (sigfile, datafile): | ||||
try: | try: | ||||
if f: | if f: | ||||
os.unlink(f) | os.unlink(f) | ||||
except OSError: | except OSError: |
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'hgk', b'path', default=b'hgk', | b'hgk', | ||||
b'path', | |||||
default=b'hgk', | |||||
) | ) | ||||
@command( | @command( | ||||
b'debug-diff-tree', | b'debug-diff-tree', | ||||
[ | [ | ||||
(b'p', b'patch', None, _(b'generate patch')), | (b'p', b'patch', None, _(b'generate patch')), | ||||
(b'r', b'recursive', None, _(b'recursive')), | (b'r', b'recursive', None, _(b'recursive')), |
pickle = util.pickle | pickle = util.pickle | ||||
cmdtable = {} | cmdtable = {} | ||||
command = registrar.command(cmdtable) | command = registrar.command(cmdtable) | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'experimental', b'histedit.autoverb', default=False, | b'experimental', | ||||
b'histedit.autoverb', | |||||
default=False, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'histedit', b'defaultrev', default=None, | b'histedit', | ||||
b'defaultrev', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'histedit', b'dropmissing', default=False, | b'histedit', | ||||
b'dropmissing', | |||||
default=False, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'histedit', b'linelen', default=80, | b'histedit', | ||||
b'linelen', | |||||
default=80, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'histedit', b'singletransaction', default=False, | b'histedit', | ||||
b'singletransaction', | |||||
default=False, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'ui', b'interface.histedit', default=None, | b'ui', | ||||
b'interface.histedit', | |||||
default=None, | |||||
) | ) | ||||
configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}') | configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}') | ||||
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | ||||
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | ||||
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
actiontable = {} | actiontable = {} | ||||
primaryactions = set() | primaryactions = set() | ||||
secondaryactions = set() | secondaryactions = set() | ||||
tertiaryactions = set() | tertiaryactions = set() | ||||
internalactions = set() | internalactions = set() | ||||
def geteditcomment(ui, first, last): | def geteditcomment(ui, first, last): | ||||
""" construct the editor comment | """construct the editor comment | ||||
The comment includes:: | The comment includes:: | ||||
- an intro | - an intro | ||||
- sorted primary commands | - sorted primary commands | ||||
- sorted short commands | - sorted short commands | ||||
- sorted long commands | - sorted long commands | ||||
- additional hints | - additional hints | ||||
Commands are only included once. | Commands are only included once. | ||||
class histeditaction(object): | class histeditaction(object): | ||||
def __init__(self, state, node): | def __init__(self, state, node): | ||||
self.state = state | self.state = state | ||||
self.repo = state.repo | self.repo = state.repo | ||||
self.node = node | self.node = node | ||||
@classmethod | @classmethod | ||||
def fromrule(cls, state, rule): | def fromrule(cls, state, rule): | ||||
"""Parses the given rule, returning an instance of the histeditaction. | """Parses the given rule, returning an instance of the histeditaction.""" | ||||
""" | |||||
ruleid = rule.strip().split(b' ', 1)[0] | ruleid = rule.strip().split(b' ', 1)[0] | ||||
# ruleid can be anything from rev numbers, hashes, "bookmarks" etc | # ruleid can be anything from rev numbers, hashes, "bookmarks" etc | ||||
# Check for validation of rule ids and get the rulehash | # Check for validation of rule ids and get the rulehash | ||||
try: | try: | ||||
rev = node.bin(ruleid) | rev = node.bin(ruleid) | ||||
except TypeError: | except TypeError: | ||||
try: | try: | ||||
_ctx = scmutil.revsingle(state.repo, ruleid) | _ctx = scmutil.revsingle(state.repo, ruleid) | ||||
# trim to 75 columns by default so it's not stupidly wide in my editor | # trim to 75 columns by default so it's not stupidly wide in my editor | ||||
# (the 5 more are left for verb) | # (the 5 more are left for verb) | ||||
maxlen = self.repo.ui.configint(b'histedit', b'linelen') | maxlen = self.repo.ui.configint(b'histedit', b'linelen') | ||||
maxlen = max(maxlen, 22) # avoid truncating hash | maxlen = max(maxlen, 22) # avoid truncating hash | ||||
return stringutil.ellipsis(line, maxlen) | return stringutil.ellipsis(line, maxlen) | ||||
def tostate(self): | def tostate(self): | ||||
"""Print an action in format used by histedit state files | """Print an action in format used by histedit state files | ||||
(the first line is a verb, the remainder is the second) | (the first line is a verb, the remainder is the second) | ||||
""" | """ | ||||
return b"%s\n%s" % (self.verb, node.hex(self.node)) | return b"%s\n%s" % (self.verb, node.hex(self.node)) | ||||
def run(self): | def run(self): | ||||
"""Runs the action. The default behavior is simply apply the action's | """Runs the action. The default behavior is simply apply the action's | ||||
rulectx onto the current parentctx.""" | rulectx onto the current parentctx.""" | ||||
self.applychange() | self.applychange() | ||||
self.continuedirty() | self.continuedirty() | ||||
if other in self.conflicts: | if other in self.conflicts: | ||||
self.conflicts.remove(other) | self.conflicts.remove(other) | ||||
return self.conflicts | return self.conflicts | ||||
# ============ EVENTS =============== | # ============ EVENTS =============== | ||||
def movecursor(state, oldpos, newpos): | def movecursor(state, oldpos, newpos): | ||||
'''Change the rule/changeset that the cursor is pointing to, regardless of | """Change the rule/changeset that the cursor is pointing to, regardless of | ||||
current mode (you can switch between patches from the view patch window).''' | current mode (you can switch between patches from the view patch window).""" | ||||
state[b'pos'] = newpos | state[b'pos'] = newpos | ||||
mode, _ = state[b'mode'] | mode, _ = state[b'mode'] | ||||
if mode == MODE_RULES: | if mode == MODE_RULES: | ||||
# Scroll through the list by updating the view for MODE_RULES, so that | # Scroll through the list by updating the view for MODE_RULES, so that | ||||
# even if we are not currently viewing the rules, switching back will | # even if we are not currently viewing the rules, switching back will | ||||
# result in the cursor's rule being visible. | # result in the cursor's rule being visible. | ||||
modestate = state[b'modes'][MODE_RULES] | modestate = state[b'modes'][MODE_RULES] | ||||
if next: | if next: | ||||
index += 1 | index += 1 | ||||
else: | else: | ||||
index -= 1 | index -= 1 | ||||
changeaction(state, pos, KEY_LIST[index % len(KEY_LIST)]) | changeaction(state, pos, KEY_LIST[index % len(KEY_LIST)]) | ||||
def changeview(state, delta, unit): | def changeview(state, delta, unit): | ||||
'''Change the region of whatever is being viewed (a patch or the list of | """Change the region of whatever is being viewed (a patch or the list of | ||||
changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'.''' | changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'.""" | ||||
mode, _ = state[b'mode'] | mode, _ = state[b'mode'] | ||||
if mode != MODE_PATCH: | if mode != MODE_PATCH: | ||||
return | return | ||||
mode_state = state[b'modes'][mode] | mode_state = state[b'modes'][mode] | ||||
num_lines = len(mode_state[b'patchcontents']) | num_lines = len(mode_state[b'patchcontents']) | ||||
page_height = state[b'page_height'] | page_height = state[b'page_height'] | ||||
unit = page_height if unit == b'page' else 1 | unit = page_height if unit == b'page' else 1 | ||||
num_pages = 1 + (num_lines - 1) // page_height | num_pages = 1 + (num_lines - 1) // page_height | ||||
state = { | state = { | ||||
b'pos': 0, | b'pos': 0, | ||||
b'rules': rules, | b'rules': rules, | ||||
b'selected': None, | b'selected': None, | ||||
b'mode': (MODE_INIT, MODE_INIT), | b'mode': (MODE_INIT, MODE_INIT), | ||||
b'page_height': None, | b'page_height': None, | ||||
b'modes': { | b'modes': { | ||||
MODE_RULES: {b'line_offset': 0,}, | MODE_RULES: { | ||||
MODE_PATCH: {b'line_offset': 0,}, | b'line_offset': 0, | ||||
}, | |||||
MODE_PATCH: { | |||||
b'line_offset': 0, | |||||
}, | |||||
}, | }, | ||||
b'repo': repo, | b'repo': repo, | ||||
} | } | ||||
# eventloop | # eventloop | ||||
ch = None | ch = None | ||||
stdscr.clear() | stdscr.clear() | ||||
stdscr.refresh() | stdscr.refresh() |
) | ) | ||||
from mercurial.utils import dateutil | from mercurial.utils import dateutil | ||||
from .. import notify | from .. import notify | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'notify_obsoleted', b'domain', default=None, | b'notify_obsoleted', | ||||
b'domain', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify_obsoleted', b'messageidseed', default=None, | b'notify_obsoleted', | ||||
b'messageidseed', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify_obsoleted', | b'notify_obsoleted', | ||||
b'template', | b'template', | ||||
default=b'''Subject: changeset abandoned | default=b'''Subject: changeset abandoned | ||||
This changeset has been abandoned. | This changeset has been abandoned. | ||||
''', | ''', |
) | ) | ||||
from mercurial.utils import dateutil | from mercurial.utils import dateutil | ||||
from .. import notify | from .. import notify | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'notify_published', b'domain', default=None, | b'notify_published', | ||||
b'domain', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify_published', b'messageidseed', default=None, | b'notify_published', | ||||
b'messageidseed', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify_published', | b'notify_published', | ||||
b'template', | b'template', | ||||
default=b'''Subject: changeset published | default=b'''Subject: changeset published | ||||
This changeset has been published. | This changeset has been published. | ||||
''', | ''', |
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'infinitepush', b'server', default=False, | b'infinitepush', | ||||
b'server', | |||||
default=False, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'infinitepush', b'storetype', default=b'', | b'infinitepush', | ||||
b'storetype', | |||||
default=b'', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'infinitepush', b'indextype', default=b'', | b'infinitepush', | ||||
b'indextype', | |||||
default=b'', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'infinitepush', b'indexpath', default=b'', | b'infinitepush', | ||||
b'indexpath', | |||||
default=b'', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'infinitepush', b'storeallparts', default=False, | b'infinitepush', | ||||
b'storeallparts', | |||||
default=False, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'infinitepush', b'reponame', default=b'', | b'infinitepush', | ||||
b'reponame', | |||||
default=b'', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'scratchbranch', b'storepath', default=b'', | b'scratchbranch', | ||||
b'storepath', | |||||
default=b'', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'infinitepush', b'branchpattern', default=b'', | b'infinitepush', | ||||
b'branchpattern', | |||||
default=b'', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'infinitepush', b'pushtobundlestore', default=False, | b'infinitepush', | ||||
b'pushtobundlestore', | |||||
default=False, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'experimental', b'server-bundlestore-bookmark', default=b'', | b'experimental', | ||||
b'server-bundlestore-bookmark', | |||||
default=b'', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'experimental', b'infinitepush-scratchpush', default=False, | b'experimental', | ||||
b'infinitepush-scratchpush', | |||||
default=False, | |||||
) | ) | ||||
experimental = b'experimental' | experimental = b'experimental' | ||||
configbookmark = b'server-bundlestore-bookmark' | configbookmark = b'server-bundlestore-bookmark' | ||||
configscratchpush = b'infinitepush-scratchpush' | configscratchpush = b'infinitepush-scratchpush' | ||||
scratchbranchparttype = bundleparts.scratchbranchparttype | scratchbranchparttype = bundleparts.scratchbranchparttype | ||||
revsetpredicate = registrar.revsetpredicate() | revsetpredicate = registrar.revsetpredicate() | ||||
loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG') | loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG') | ||||
numeric_loglevel = getattr(logging, loglevel.upper(), None) | numeric_loglevel = getattr(logging, loglevel.upper(), None) | ||||
if not isinstance(numeric_loglevel, int): | if not isinstance(numeric_loglevel, int): | ||||
raise error.Abort(_(b'invalid log level %s') % loglevel) | raise error.Abort(_(b'invalid log level %s') % loglevel) | ||||
return numeric_loglevel | return numeric_loglevel | ||||
def _tryhoist(ui, remotebookmark): | def _tryhoist(ui, remotebookmark): | ||||
'''returns a bookmarks with hoisted part removed | """returns a bookmarks with hoisted part removed | ||||
Remotenames extension has a 'hoist' config that allows to use remote | Remotenames extension has a 'hoist' config that allows to use remote | ||||
bookmarks without specifying remote path. For example, 'hg update master' | bookmarks without specifying remote path. For example, 'hg update master' | ||||
works as well as 'hg update remote/master'. We want to allow the same in | works as well as 'hg update remote/master'. We want to allow the same in | ||||
infinitepush. | infinitepush. | ||||
''' | """ | ||||
if common.isremotebooksenabled(ui): | if common.isremotebooksenabled(ui): | ||||
hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/' | hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/' | ||||
if remotebookmark.startswith(hoist): | if remotebookmark.startswith(hoist): | ||||
return remotebookmark[len(hoist) :] | return remotebookmark[len(hoist) :] | ||||
return remotebookmark | return remotebookmark | ||||
yield pushkey.decodekeys(d) | yield pushkey.decodekeys(d) | ||||
def _readbundlerevs(bundlerepo): | def _readbundlerevs(bundlerepo): | ||||
return list(bundlerepo.revs(b'bundle()')) | return list(bundlerepo.revs(b'bundle()')) | ||||
def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui): | def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui): | ||||
'''Tells remotefilelog to include all changed files to the changegroup | """Tells remotefilelog to include all changed files to the changegroup | ||||
By default remotefilelog doesn't include file content to the changegroup. | By default remotefilelog doesn't include file content to the changegroup. | ||||
But we need to include it if we are fetching from bundlestore. | But we need to include it if we are fetching from bundlestore. | ||||
''' | """ | ||||
changedfiles = set() | changedfiles = set() | ||||
cl = bundlerepo.changelog | cl = bundlerepo.changelog | ||||
for r in bundlerevs: | for r in bundlerevs: | ||||
# [3] means changed files | # [3] means changed files | ||||
changedfiles.update(cl.read(r)[3]) | changedfiles.update(cl.read(r)[3]) | ||||
if not changedfiles: | if not changedfiles: | ||||
return bundlecaps | return bundlecaps | ||||
if not appended: | if not appended: | ||||
# Not found excludepattern cap. Just append it | # Not found excludepattern cap. Just append it | ||||
newcaps.append(b'excludepattern=' + changedfiles) | newcaps.append(b'excludepattern=' + changedfiles) | ||||
return newcaps | return newcaps | ||||
def _rebundle(bundlerepo, bundleroots, unknownhead): | def _rebundle(bundlerepo, bundleroots, unknownhead): | ||||
''' | """ | ||||
Bundle may include more revision then user requested. For example, | Bundle may include more revision then user requested. For example, | ||||
if user asks for revision but bundle also consists its descendants. | if user asks for revision but bundle also consists its descendants. | ||||
This function will filter out all revision that user is not requested. | This function will filter out all revision that user is not requested. | ||||
''' | """ | ||||
parts = [] | parts = [] | ||||
version = b'02' | version = b'02' | ||||
outgoing = discovery.outgoing( | outgoing = discovery.outgoing( | ||||
bundlerepo, commonheads=bundleroots, ancestorsof=[unknownhead] | bundlerepo, commonheads=bundleroots, ancestorsof=[unknownhead] | ||||
) | ) | ||||
cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull') | cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull') | ||||
cgstream = util.chunkbuffer(cgstream).read() | cgstream = util.chunkbuffer(cgstream).read() | ||||
def _needsrebundling(head, bundlerepo): | def _needsrebundling(head, bundlerepo): | ||||
bundleheads = list(bundlerepo.revs(b'heads(bundle())')) | bundleheads = list(bundlerepo.revs(b'heads(bundle())')) | ||||
return not ( | return not ( | ||||
len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head | len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head | ||||
) | ) | ||||
def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile): | def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile): | ||||
'''generates bundle that will be send to the user | """generates bundle that will be send to the user | ||||
returns tuple with raw bundle string and bundle type | returns tuple with raw bundle string and bundle type | ||||
''' | """ | ||||
parts = [] | parts = [] | ||||
if not _needsrebundling(head, bundlerepo): | if not _needsrebundling(head, bundlerepo): | ||||
with util.posixfile(bundlefile, b"rb") as f: | with util.posixfile(bundlefile, b"rb") as f: | ||||
unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile) | unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile) | ||||
if isinstance(unbundler, changegroup.cg1unpacker): | if isinstance(unbundler, changegroup.cg1unpacker): | ||||
part = bundle2.bundlepart( | part = bundle2.bundlepart( | ||||
b'changegroup', data=unbundler._stream.read() | b'changegroup', data=unbundler._stream.read() | ||||
) | ) | ||||
if part.type in (b'pushkey', b'changegroup'): | if part.type in (b'pushkey', b'changegroup'): | ||||
if op.reply is not None: | if op.reply is not None: | ||||
rpart = op.reply.newpart(b'reply:%s' % part.type) | rpart = op.reply.newpart(b'reply:%s' % part.type) | ||||
rpart.addparam( | rpart.addparam( | ||||
b'in-reply-to', b'%d' % part.id, mandatory=False | b'in-reply-to', b'%d' % part.id, mandatory=False | ||||
) | ) | ||||
rpart.addparam(b'return', b'1', mandatory=False) | rpart.addparam(b'return', b'1', mandatory=False) | ||||
op.records.add(part.type, {b'return': 1,}) | op.records.add( | ||||
part.type, | |||||
{ | |||||
b'return': 1, | |||||
}, | |||||
) | |||||
if bundlepart: | if bundlepart: | ||||
bundler.addpart(bundlepart) | bundler.addpart(bundlepart) | ||||
# storing the bundle in the bundlestore | # storing the bundle in the bundlestore | ||||
buf = util.chunkbuffer(bundler.getchunks()) | buf = util.chunkbuffer(bundler.getchunks()) | ||||
fd, bundlefile = pycompat.mkstemp() | fd, bundlefile = pycompat.mkstemp() | ||||
try: | try: | ||||
try: | try: | ||||
rpart.addparam( | rpart.addparam( | ||||
b'in-reply-to', str(part.id), mandatory=False | b'in-reply-to', str(part.id), mandatory=False | ||||
) | ) | ||||
rpart.addparam(b'return', b'1', mandatory=False) | rpart.addparam(b'return', b'1', mandatory=False) | ||||
else: | else: | ||||
bundle2._processpart(op, part) | bundle2._processpart(op, part) | ||||
if handleallparts: | if handleallparts: | ||||
op.records.add(part.type, {b'return': 1,}) | op.records.add( | ||||
part.type, | |||||
{ | |||||
b'return': 1, | |||||
}, | |||||
) | |||||
if bundlepart: | if bundlepart: | ||||
bundler.addpart(bundlepart) | bundler.addpart(bundlepart) | ||||
# If commits were sent, store them | # If commits were sent, store them | ||||
if cgparams: | if cgparams: | ||||
buf = util.chunkbuffer(bundler.getchunks()) | buf = util.chunkbuffer(bundler.getchunks()) | ||||
fd, bundlefile = pycompat.mkstemp() | fd, bundlefile = pycompat.mkstemp() | ||||
try: | try: | ||||
b'old': oldnode, | b'old': oldnode, | ||||
} | } | ||||
op.reply.newpart( | op.reply.newpart( | ||||
b'pushkey', mandatoryparams=pycompat.iteritems(params) | b'pushkey', mandatoryparams=pycompat.iteritems(params) | ||||
) | ) | ||||
def bundle2pushkey(orig, op, part): | def bundle2pushkey(orig, op, part): | ||||
'''Wrapper of bundle2.handlepushkey() | """Wrapper of bundle2.handlepushkey() | ||||
The only goal is to skip calling the original function if flag is set. | The only goal is to skip calling the original function if flag is set. | ||||
It's set if infinitepush push is happening. | It's set if infinitepush push is happening. | ||||
''' | """ | ||||
if op.records[scratchbranchparttype + b'_skippushkey']: | if op.records[scratchbranchparttype + b'_skippushkey']: | ||||
if op.reply is not None: | if op.reply is not None: | ||||
rpart = op.reply.newpart(b'reply:pushkey') | rpart = op.reply.newpart(b'reply:pushkey') | ||||
rpart.addparam(b'in-reply-to', str(part.id), mandatory=False) | rpart.addparam(b'in-reply-to', str(part.id), mandatory=False) | ||||
rpart.addparam(b'return', b'1', mandatory=False) | rpart.addparam(b'return', b'1', mandatory=False) | ||||
return 1 | return 1 | ||||
return orig(op, part) | return orig(op, part) | ||||
def bundle2handlephases(orig, op, part): | def bundle2handlephases(orig, op, part): | ||||
'''Wrapper of bundle2.handlephases() | """Wrapper of bundle2.handlephases() | ||||
The only goal is to skip calling the original function if flag is set. | The only goal is to skip calling the original function if flag is set. | ||||
It's set if infinitepush push is happening. | It's set if infinitepush push is happening. | ||||
''' | """ | ||||
if op.records[scratchbranchparttype + b'_skipphaseheads']: | if op.records[scratchbranchparttype + b'_skipphaseheads']: | ||||
return | return | ||||
return orig(op, part) | return orig(op, part) | ||||
def _asyncsavemetadata(root, nodes): | def _asyncsavemetadata(root, nodes): | ||||
'''starts a separate process that fills metadata for the nodes | """starts a separate process that fills metadata for the nodes | ||||
This function creates a separate process and doesn't wait for it's | This function creates a separate process and doesn't wait for it's | ||||
completion. This was done to avoid slowing down pushes | completion. This was done to avoid slowing down pushes | ||||
''' | """ | ||||
maxnodes = 50 | maxnodes = 50 | ||||
if len(nodes) > maxnodes: | if len(nodes) > maxnodes: | ||||
return | return | ||||
nodesargs = [] | nodesargs = [] | ||||
for node in nodes: | for node in nodes: | ||||
nodesargs.append(b'--node') | nodesargs.append(b'--node') | ||||
nodesargs.append(node) | nodesargs.append(node) |
heads = repo.revs(b'heads(%r)', revset) | heads = repo.revs(b'heads(%r)', revset) | ||||
if len(heads) > 1: | if len(heads) > 1: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'cannot push more than one head to a scratch branch') | _(b'cannot push more than one head to a scratch branch') | ||||
) | ) | ||||
def _handlelfs(repo, missing): | def _handlelfs(repo, missing): | ||||
'''Special case if lfs is enabled | """Special case if lfs is enabled | ||||
If lfs is enabled then we need to call prepush hook | If lfs is enabled then we need to call prepush hook | ||||
to make sure large files are uploaded to lfs | to make sure large files are uploaded to lfs | ||||
''' | """ | ||||
try: | try: | ||||
lfsmod = extensions.find(b'lfs') | lfsmod = extensions.find(b'lfs') | ||||
lfsmod.wrapper.uploadblobsfromrevs(repo, missing) | lfsmod.wrapper.uploadblobsfromrevs(repo, missing) | ||||
except KeyError: | except KeyError: | ||||
# Ignore if lfs extension is not enabled | # Ignore if lfs extension is not enabled | ||||
return | return | ||||
raise NotImplementedError() | raise NotImplementedError() | ||||
def addmanybookmarks(self, bookmarks): | def addmanybookmarks(self, bookmarks): | ||||
"""Takes a dict with mapping from bookmark to hash and records mapping | """Takes a dict with mapping from bookmark to hash and records mapping | ||||
in the metadata store.""" | in the metadata store.""" | ||||
raise NotImplementedError() | raise NotImplementedError() | ||||
def deletebookmarks(self, patterns): | def deletebookmarks(self, patterns): | ||||
"""Accepts list of bookmarks and deletes them. | """Accepts list of bookmarks and deletes them.""" | ||||
""" | |||||
raise NotImplementedError() | raise NotImplementedError() | ||||
def getbundle(self, node): | def getbundle(self, node): | ||||
"""Returns the bundleid for the bundle that contains the given node.""" | """Returns the bundleid for the bundle that contains the given node.""" | ||||
raise NotImplementedError() | raise NotImplementedError() | ||||
def getnode(self, bookmark): | def getnode(self, bookmark): | ||||
"""Returns the node for the given bookmark. None if it doesn't exist.""" | """Returns the node for the given bookmark. None if it doesn't exist.""" |
pattern = pattern.replace(b'_', b'\\_') | pattern = pattern.replace(b'_', b'\\_') | ||||
pattern = pattern.replace(b'%', b'\\%') | pattern = pattern.replace(b'%', b'\\%') | ||||
if pattern.endswith(b'*'): | if pattern.endswith(b'*'): | ||||
pattern = pattern[:-1] + b'%' | pattern = pattern[:-1] + b'%' | ||||
return pattern | return pattern | ||||
class sqlindexapi(indexapi.indexapi): | class sqlindexapi(indexapi.indexapi): | ||||
''' | """ | ||||
Sql backend for infinitepush index. See schema.sql | Sql backend for infinitepush index. See schema.sql | ||||
''' | """ | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
reponame, | reponame, | ||||
host, | host, | ||||
port, | port, | ||||
database, | database, | ||||
user, | user, |
} | } | ||||
templatefilter = registrar.templatefilter() | templatefilter = registrar.templatefilter() | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'keywordset', b'svn', default=False, | b'keywordset', | ||||
b'svn', | |||||
default=False, | |||||
) | ) | ||||
# date like in cvs' $Date | # date like in cvs' $Date | ||||
@templatefilter(b'utcdate', intype=templateutil.date) | @templatefilter(b'utcdate', intype=templateutil.date) | ||||
def utcdate(date): | def utcdate(date): | ||||
'''Date. Returns a UTC-date in this format: "2009/08/18 11:00:13". | """Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".""" | ||||
''' | |||||
dateformat = b'%Y/%m/%d %H:%M:%S' | dateformat = b'%Y/%m/%d %H:%M:%S' | ||||
return dateutil.datestr((date[0], 0), dateformat) | return dateutil.datestr((date[0], 0), dateformat) | ||||
# date like in svn's $Date | # date like in svn's $Date | ||||
@templatefilter(b'svnisodate', intype=templateutil.date) | @templatefilter(b'svnisodate', intype=templateutil.date) | ||||
def svnisodate(date): | def svnisodate(date): | ||||
'''Date. Returns a date in this format: "2009-08-18 13:00:13 | """Date. Returns a date in this format: "2009-08-18 13:00:13 | ||||
+0200 (Tue, 18 Aug 2009)". | +0200 (Tue, 18 Aug 2009)". | ||||
''' | """ | ||||
return dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)') | return dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)') | ||||
# date like in svn's $Id | # date like in svn's $Id | ||||
@templatefilter(b'svnutcdate', intype=templateutil.date) | @templatefilter(b'svnutcdate', intype=templateutil.date) | ||||
def svnutcdate(date): | def svnutcdate(date): | ||||
'''Date. Returns a UTC-date in this format: "2009-08-18 | """Date. Returns a UTC-date in this format: "2009-08-18 | ||||
11:00:13Z". | 11:00:13Z". | ||||
''' | """ | ||||
dateformat = b'%Y-%m-%d %H:%M:%SZ' | dateformat = b'%Y-%m-%d %H:%M:%SZ' | ||||
return dateutil.datestr((date[0], 0), dateformat) | return dateutil.datestr((date[0], 0), dateformat) | ||||
# make keyword tools accessible | # make keyword tools accessible | ||||
kwtools = {b'hgcmd': b''} | kwtools = {b'hgcmd': b''} | ||||
b'LastChangedDate': b'{date|svnisodate}', | b'LastChangedDate': b'{date|svnisodate}', | ||||
}, | }, | ||||
) | ) | ||||
templates.update(kwsets[ui.configbool(b'keywordset', b'svn')]) | templates.update(kwsets[ui.configbool(b'keywordset', b'svn')]) | ||||
return templates | return templates | ||||
def _shrinktext(text, subfunc): | def _shrinktext(text, subfunc): | ||||
'''Helper for keyword expansion removal in text. | """Helper for keyword expansion removal in text. | ||||
Depending on subfunc also returns number of substitutions.''' | Depending on subfunc also returns number of substitutions.""" | ||||
return subfunc(br'$\1$', text) | return subfunc(br'$\1$', text) | ||||
def _preselect(wstatus, changed): | def _preselect(wstatus, changed): | ||||
'''Retrieves modified and added files from a working directory state | """Retrieves modified and added files from a working directory state | ||||
and returns the subset of each contained in given changed files | and returns the subset of each contained in given changed files | ||||
retrieved from a change context.''' | retrieved from a change context.""" | ||||
modified = [f for f in wstatus.modified if f in changed] | modified = [f for f in wstatus.modified if f in changed] | ||||
added = [f for f in wstatus.added if f in changed] | added = [f for f in wstatus.added if f in changed] | ||||
return modified, added | return modified, added | ||||
class kwtemplater(object): | class kwtemplater(object): | ||||
''' | """ | ||||
Sets up keyword templates, corresponding keyword regex, and | Sets up keyword templates, corresponding keyword regex, and | ||||
provides keyword substitution functions. | provides keyword substitution functions. | ||||
''' | """ | ||||
def __init__(self, ui, repo, inc, exc): | def __init__(self, ui, repo, inc, exc): | ||||
self.ui = ui | self.ui = ui | ||||
self._repo = weakref.ref(repo) | self._repo = weakref.ref(repo) | ||||
self.match = match.match(repo.root, b'', [], inc, exc) | self.match = match.match(repo.root, b'', [], inc, exc) | ||||
self.restrict = kwtools[b'hgcmd'] in restricted.split() | self.restrict = kwtools[b'hgcmd'] in restricted.split() | ||||
self.postcommit = False | self.postcommit = False | ||||
and self.match(path) | and self.match(path) | ||||
and not stringutil.binary(data) | and not stringutil.binary(data) | ||||
): | ): | ||||
ctx = self.linkctx(path, node) | ctx = self.linkctx(path, node) | ||||
return self.substitute(data, path, ctx, self.rekw.sub) | return self.substitute(data, path, ctx, self.rekw.sub) | ||||
return data | return data | ||||
def iskwfile(self, cand, ctx): | def iskwfile(self, cand, ctx): | ||||
'''Returns subset of candidates which are configured for keyword | """Returns subset of candidates which are configured for keyword | ||||
expansion but are not symbolic links.''' | expansion but are not symbolic links.""" | ||||
return [f for f in cand if self.match(f) and b'l' not in ctx.flags(f)] | return [f for f in cand if self.match(f) and b'l' not in ctx.flags(f)] | ||||
def overwrite(self, ctx, candidates, lookup, expand, rekw=False): | def overwrite(self, ctx, candidates, lookup, expand, rekw=False): | ||||
'''Overwrites selected files expanding/shrinking keywords.''' | '''Overwrites selected files expanding/shrinking keywords.''' | ||||
if self.restrict or lookup or self.postcommit: # exclude kw_copy | if self.restrict or lookup or self.postcommit: # exclude kw_copy | ||||
candidates = self.iskwfile(candidates, ctx) | candidates = self.iskwfile(candidates, ctx) | ||||
if not candidates: | if not candidates: | ||||
return | return | ||||
'''Returns lines with keyword substitutions removed.''' | '''Returns lines with keyword substitutions removed.''' | ||||
if self.match(fname): | if self.match(fname): | ||||
text = b''.join(lines) | text = b''.join(lines) | ||||
if not stringutil.binary(text): | if not stringutil.binary(text): | ||||
return _shrinktext(text, self.rekwexp.sub).splitlines(True) | return _shrinktext(text, self.rekwexp.sub).splitlines(True) | ||||
return lines | return lines | ||||
def wread(self, fname, data): | def wread(self, fname, data): | ||||
'''If in restricted mode returns data read from wdir with | """If in restricted mode returns data read from wdir with | ||||
keyword substitutions removed.''' | keyword substitutions removed.""" | ||||
if self.restrict: | if self.restrict: | ||||
return self.shrink(fname, data) | return self.shrink(fname, data) | ||||
return data | return data | ||||
class kwfilelog(filelog.filelog): | class kwfilelog(filelog.filelog): | ||||
''' | """ | ||||
Subclass of filelog to hook into its read, add, cmp methods. | Subclass of filelog to hook into its read, add, cmp methods. | ||||
Keywords are "stored" unexpanded, and processed on reading. | Keywords are "stored" unexpanded, and processed on reading. | ||||
''' | """ | ||||
def __init__(self, opener, kwt, path): | def __init__(self, opener, kwt, path): | ||||
super(kwfilelog, self).__init__(opener, path) | super(kwfilelog, self).__init__(opener, path) | ||||
self.kwt = kwt | self.kwt = kwt | ||||
self.path = path | self.path = path | ||||
def read(self, node): | def read(self, node): | ||||
'''Expands keywords when reading filelog.''' | '''Expands keywords when reading filelog.''' | ||||
def cmp(self, node, text): | def cmp(self, node, text): | ||||
'''Removes keyword substitutions for comparison.''' | '''Removes keyword substitutions for comparison.''' | ||||
text = self.kwt.shrink(self.path, text) | text = self.kwt.shrink(self.path, text) | ||||
return super(kwfilelog, self).cmp(node, text) | return super(kwfilelog, self).cmp(node, text) | ||||
def _status(ui, repo, wctx, kwt, *pats, **opts): | def _status(ui, repo, wctx, kwt, *pats, **opts): | ||||
'''Bails out if [keyword] configuration is not active. | """Bails out if [keyword] configuration is not active. | ||||
Returns status of working directory.''' | Returns status of working directory.""" | ||||
if kwt: | if kwt: | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
return repo.status( | return repo.status( | ||||
match=scmutil.match(wctx, pats, opts), | match=scmutil.match(wctx, pats, opts), | ||||
clean=True, | clean=True, | ||||
unknown=opts.get(b'unknown') or opts.get(b'all'), | unknown=opts.get(b'unknown') or opts.get(b'all'), | ||||
) | ) | ||||
if ui.configitems(b'keyword'): | if ui.configitems(b'keyword'): | ||||
[ | [ | ||||
(b'd', b'default', None, _(b'show default keyword template maps')), | (b'd', b'default', None, _(b'show default keyword template maps')), | ||||
(b'f', b'rcfile', b'', _(b'read maps from rcfile'), _(b'FILE')), | (b'f', b'rcfile', b'', _(b'read maps from rcfile'), _(b'FILE')), | ||||
], | ], | ||||
_(b'hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'), | _(b'hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'), | ||||
optionalrepo=True, | optionalrepo=True, | ||||
) | ) | ||||
def demo(ui, repo, *args, **opts): | def demo(ui, repo, *args, **opts): | ||||
'''print [keywordmaps] configuration and an expansion example | """print [keywordmaps] configuration and an expansion example | ||||
Show current, custom, or default keyword template maps and their | Show current, custom, or default keyword template maps and their | ||||
expansions. | expansions. | ||||
Extend the current configuration by specifying maps as arguments | Extend the current configuration by specifying maps as arguments | ||||
and using -f/--rcfile to source an external hgrc file. | and using -f/--rcfile to source an external hgrc file. | ||||
Use -d/--default to disable current configuration. | Use -d/--default to disable current configuration. | ||||
See :hg:`help templates` for information on templates and filters. | See :hg:`help templates` for information on templates and filters. | ||||
''' | """ | ||||
def demoitems(section, items): | def demoitems(section, items): | ||||
ui.write(b'[%s]\n' % section) | ui.write(b'[%s]\n' % section) | ||||
for k, v in sorted(items): | for k, v in sorted(items): | ||||
if isinstance(v, bool): | if isinstance(v, bool): | ||||
v = stringutil.pprint(v) | v = stringutil.pprint(v) | ||||
ui.write(b'%s = %s\n' % (k, v)) | ui.write(b'%s = %s\n' % (k, v)) | ||||
@command( | @command( | ||||
b'kwexpand', | b'kwexpand', | ||||
cmdutil.walkopts, | cmdutil.walkopts, | ||||
_(b'hg kwexpand [OPTION]... [FILE]...'), | _(b'hg kwexpand [OPTION]... [FILE]...'), | ||||
inferrepo=True, | inferrepo=True, | ||||
) | ) | ||||
def expand(ui, repo, *pats, **opts): | def expand(ui, repo, *pats, **opts): | ||||
'''expand keywords in the working directory | """expand keywords in the working directory | ||||
Run after (re)enabling keyword expansion. | Run after (re)enabling keyword expansion. | ||||
kwexpand refuses to run if given files contain local changes. | kwexpand refuses to run if given files contain local changes. | ||||
''' | """ | ||||
# 3rd argument sets expansion to True | # 3rd argument sets expansion to True | ||||
_kwfwrite(ui, repo, True, *pats, **opts) | _kwfwrite(ui, repo, True, *pats, **opts) | ||||
@command( | @command( | ||||
b'kwfiles', | b'kwfiles', | ||||
[ | [ | ||||
(b'A', b'all', None, _(b'show keyword status flags of all files')), | (b'A', b'all', None, _(b'show keyword status flags of all files')), | ||||
(b'i', b'ignore', None, _(b'show files excluded from expansion')), | (b'i', b'ignore', None, _(b'show files excluded from expansion')), | ||||
(b'u', b'unknown', None, _(b'only show unknown (not tracked) files')), | (b'u', b'unknown', None, _(b'only show unknown (not tracked) files')), | ||||
] | ] | ||||
+ cmdutil.walkopts, | + cmdutil.walkopts, | ||||
_(b'hg kwfiles [OPTION]... [FILE]...'), | _(b'hg kwfiles [OPTION]... [FILE]...'), | ||||
inferrepo=True, | inferrepo=True, | ||||
) | ) | ||||
def files(ui, repo, *pats, **opts): | def files(ui, repo, *pats, **opts): | ||||
'''show files configured for keyword expansion | """show files configured for keyword expansion | ||||
List which files in the working directory are matched by the | List which files in the working directory are matched by the | ||||
[keyword] configuration patterns. | [keyword] configuration patterns. | ||||
Useful to prevent inadvertent keyword expansion and to speed up | Useful to prevent inadvertent keyword expansion and to speed up | ||||
execution by including only files that are actual candidates for | execution by including only files that are actual candidates for | ||||
expansion. | expansion. | ||||
See :hg:`help keyword` on how to construct patterns both for | See :hg:`help keyword` on how to construct patterns both for | ||||
inclusion and exclusion of files. | inclusion and exclusion of files. | ||||
With -A/--all and -v/--verbose the codes used to show the status | With -A/--all and -v/--verbose the codes used to show the status | ||||
of files are:: | of files are:: | ||||
K = keyword expansion candidate | K = keyword expansion candidate | ||||
k = keyword expansion candidate (not tracked) | k = keyword expansion candidate (not tracked) | ||||
I = ignored | I = ignored | ||||
i = ignored (not tracked) | i = ignored (not tracked) | ||||
''' | """ | ||||
kwt = getattr(repo, '_keywordkwt', None) | kwt = getattr(repo, '_keywordkwt', None) | ||||
wctx = repo[None] | wctx = repo[None] | ||||
status = _status(ui, repo, wctx, kwt, *pats, **opts) | status = _status(ui, repo, wctx, kwt, *pats, **opts) | ||||
if pats: | if pats: | ||||
cwd = repo.getcwd() | cwd = repo.getcwd() | ||||
else: | else: | ||||
cwd = b'' | cwd = b'' | ||||
files = [] | files = [] | ||||
@command( | @command( | ||||
b'kwshrink', | b'kwshrink', | ||||
cmdutil.walkopts, | cmdutil.walkopts, | ||||
_(b'hg kwshrink [OPTION]... [FILE]...'), | _(b'hg kwshrink [OPTION]... [FILE]...'), | ||||
inferrepo=True, | inferrepo=True, | ||||
) | ) | ||||
def shrink(ui, repo, *pats, **opts): | def shrink(ui, repo, *pats, **opts): | ||||
'''revert expanded keywords in the working directory | """revert expanded keywords in the working directory | ||||
Must be run before changing/disabling active keywords. | Must be run before changing/disabling active keywords. | ||||
kwshrink refuses to run if given files contain local changes. | kwshrink refuses to run if given files contain local changes. | ||||
''' | """ | ||||
# 3rd argument sets expansion to False | # 3rd argument sets expansion to False | ||||
_kwfwrite(ui, repo, False, *pats, **opts) | _kwfwrite(ui, repo, False, *pats, **opts) | ||||
# monkeypatches | # monkeypatches | ||||
def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None): | def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None): | ||||
'''Monkeypatch/wrap patch.patchfile.__init__ to avoid | """Monkeypatch/wrap patch.patchfile.__init__ to avoid | ||||
rejects or conflicts due to expanded keywords in working dir.''' | rejects or conflicts due to expanded keywords in working dir.""" | ||||
orig(self, ui, gp, backend, store, eolmode) | orig(self, ui, gp, backend, store, eolmode) | ||||
kwt = getattr(getattr(backend, 'repo', None), '_keywordkwt', None) | kwt = getattr(getattr(backend, 'repo', None), '_keywordkwt', None) | ||||
if kwt: | if kwt: | ||||
# shrink keywords read from working dir | # shrink keywords read from working dir | ||||
self.lines = kwt.shrinklines(self.fname, self.lines) | self.lines = kwt.shrinklines(self.fname, self.lines) | ||||
def kwdiff(orig, repo, *args, **kwargs): | def kwdiff(orig, repo, *args, **kwargs): | ||||
ctx = repo[newid] | ctx = repo[newid] | ||||
kwt.restrict = True | kwt.restrict = True | ||||
kwt.overwrite(ctx, ctx.files(), False, True) | kwt.overwrite(ctx, ctx.files(), False, True) | ||||
kwt.restrict = False | kwt.restrict = False | ||||
return newid | return newid | ||||
def kw_copy(orig, ui, repo, pats, opts, rename=False): | def kw_copy(orig, ui, repo, pats, opts, rename=False): | ||||
'''Wraps cmdutil.copy so that copy/rename destinations do not | """Wraps cmdutil.copy so that copy/rename destinations do not | ||||
contain expanded keywords. | contain expanded keywords. | ||||
Note that the source of a regular file destination may also be a | Note that the source of a regular file destination may also be a | ||||
symlink: | symlink: | ||||
hg cp sym x -> x is symlink | hg cp sym x -> x is symlink | ||||
cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords) | cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords) | ||||
For the latter we have to follow the symlink to find out whether its | For the latter we have to follow the symlink to find out whether its | ||||
target is configured for expansion and we therefore must unexpand the | target is configured for expansion and we therefore must unexpand the | ||||
keywords in the destination.''' | keywords in the destination.""" | ||||
kwt = getattr(repo, '_keywordkwt', None) | kwt = getattr(repo, '_keywordkwt', None) | ||||
if kwt is None: | if kwt is None: | ||||
return orig(ui, repo, pats, opts, rename) | return orig(ui, repo, pats, opts, rename) | ||||
with repo.wlock(): | with repo.wlock(): | ||||
orig(ui, repo, pats, opts, rename) | orig(ui, repo, pats, opts, rename) | ||||
if opts.get(b'dry_run'): | if opts.get(b'dry_run'): | ||||
return | return | ||||
wctx = repo[None] | wctx = repo[None] | ||||
cwd = repo.getcwd() | cwd = repo.getcwd() | ||||
def haskwsource(dest): | def haskwsource(dest): | ||||
'''Returns true if dest is a regular file and configured for | """Returns true if dest is a regular file and configured for | ||||
expansion or a symlink which points to a file configured for | expansion or a symlink which points to a file configured for | ||||
expansion. ''' | expansion.""" | ||||
source = repo.dirstate.copied(dest) | source = repo.dirstate.copied(dest) | ||||
if b'l' in wctx.flags(source): | if b'l' in wctx.flags(source): | ||||
source = pathutil.canonpath( | source = pathutil.canonpath( | ||||
repo.root, cwd, os.path.realpath(source) | repo.root, cwd, os.path.realpath(source) | ||||
) | ) | ||||
return kwt.match(source) | return kwt.match(source) | ||||
candidates = [ | candidates = [ | ||||
) | ) | ||||
or self.size() == fctx.size() | or self.size() == fctx.size() | ||||
): | ): | ||||
return self._filelog.cmp(self._filenode, fctx.data()) | return self._filelog.cmp(self._filenode, fctx.data()) | ||||
return True | return True | ||||
def uisetup(ui): | def uisetup(ui): | ||||
''' Monkeypatches dispatch._parse to retrieve user command. | """Monkeypatches dispatch._parse to retrieve user command. | ||||
Overrides file method to return kwfilelog instead of filelog | Overrides file method to return kwfilelog instead of filelog | ||||
if file matches user configuration. | if file matches user configuration. | ||||
Wraps commit to overwrite configured files with updated | Wraps commit to overwrite configured files with updated | ||||
keyword substitutions. | keyword substitutions. | ||||
Monkeypatches patch and webcommands.''' | Monkeypatches patch and webcommands.""" | ||||
def kwdispatch_parse(orig, ui, args): | def kwdispatch_parse(orig, ui, args): | ||||
'''Monkeypatch dispatch._parse to obtain running hg command.''' | '''Monkeypatch dispatch._parse to obtain running hg command.''' | ||||
cmd, func, args, options, cmdoptions = orig(ui, args) | cmd, func, args, options, cmdoptions = orig(ui, args) | ||||
kwtools[b'hgcmd'] = cmd | kwtools[b'hgcmd'] = cmd | ||||
return cmd, func, args, options, cmdoptions | return cmd, func, args, options, cmdoptions | ||||
extensions.wrapfunction(dispatch, b'_parse', kwdispatch_parse) | extensions.wrapfunction(dispatch, b'_parse', kwdispatch_parse) |
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
eh = exthelper.exthelper() | eh = exthelper.exthelper() | ||||
eh.merge(lfcommands.eh) | eh.merge(lfcommands.eh) | ||||
eh.merge(overrides.eh) | eh.merge(overrides.eh) | ||||
eh.merge(proto.eh) | eh.merge(proto.eh) | ||||
eh.configitem( | eh.configitem( | ||||
b'largefiles', b'minsize', default=eh.configitem.dynamicdefault, | b'largefiles', | ||||
b'minsize', | |||||
default=eh.configitem.dynamicdefault, | |||||
) | ) | ||||
eh.configitem( | eh.configitem( | ||||
b'largefiles', b'patterns', default=list, | b'largefiles', | ||||
b'patterns', | |||||
default=list, | |||||
) | ) | ||||
eh.configitem( | eh.configitem( | ||||
b'largefiles', b'usercache', default=None, | b'largefiles', | ||||
b'usercache', | |||||
default=None, | |||||
) | ) | ||||
cmdtable = eh.cmdtable | cmdtable = eh.cmdtable | ||||
configtable = eh.configtable | configtable = eh.configtable | ||||
extsetup = eh.finalextsetup | extsetup = eh.finalextsetup | ||||
reposetup = reposetup.reposetup | reposetup = reposetup.reposetup | ||||
uisetup = eh.finaluisetup | uisetup = eh.finaluisetup | ||||
from mercurial.i18n import _ | from mercurial.i18n import _ | ||||
from mercurial import node, util | from mercurial import node, util | ||||
from . import lfutil | from . import lfutil | ||||
class StoreError(Exception): | class StoreError(Exception): | ||||
'''Raised when there is a problem getting files from or putting | """Raised when there is a problem getting files from or putting | ||||
files to a central store.''' | files to a central store.""" | ||||
def __init__(self, filename, hash, url, detail): | def __init__(self, filename, hash, url, detail): | ||||
self.filename = filename | self.filename = filename | ||||
self.hash = hash | self.hash = hash | ||||
self.url = url | self.url = url | ||||
self.detail = detail | self.detail = detail | ||||
def longmessage(self): | def longmessage(self): | ||||
self.repo = repo | self.repo = repo | ||||
self.url = url | self.url = url | ||||
def put(self, source, hash): | def put(self, source, hash): | ||||
'''Put source file into the store so it can be retrieved by hash.''' | '''Put source file into the store so it can be retrieved by hash.''' | ||||
raise NotImplementedError(b'abstract method') | raise NotImplementedError(b'abstract method') | ||||
def exists(self, hashes): | def exists(self, hashes): | ||||
'''Check to see if the store contains the given hashes. Given an | """Check to see if the store contains the given hashes. Given an | ||||
iterable of hashes it returns a mapping from hash to bool.''' | iterable of hashes it returns a mapping from hash to bool.""" | ||||
raise NotImplementedError(b'abstract method') | raise NotImplementedError(b'abstract method') | ||||
def get(self, files): | def get(self, files): | ||||
'''Get the specified largefiles from the store and write to local | """Get the specified largefiles from the store and write to local | ||||
files under repo.root. files is a list of (filename, hash) | files under repo.root. files is a list of (filename, hash) | ||||
tuples. Return (success, missing), lists of files successfully | tuples. Return (success, missing), lists of files successfully | ||||
downloaded and those not found in the store. success is a list | downloaded and those not found in the store. success is a list | ||||
of (filename, hash) tuples; missing is a list of filenames that | of (filename, hash) tuples; missing is a list of filenames that | ||||
we could not get. (The detailed error message will already have | we could not get. (The detailed error message will already have | ||||
been presented to the user, so missing is just supplied as a | been presented to the user, so missing is just supplied as a | ||||
summary.)''' | summary.)""" | ||||
success = [] | success = [] | ||||
missing = [] | missing = [] | ||||
ui = self.ui | ui = self.ui | ||||
at = 0 | at = 0 | ||||
available = self.exists({hash for (_filename, hash) in files}) | available = self.exists({hash for (_filename, hash) in files}) | ||||
with ui.makeprogress( | with ui.makeprogress( | ||||
_(b'getting largefiles'), unit=_(b'files'), total=len(files) | _(b'getting largefiles'), unit=_(b'files'), total=len(files) | ||||
util.unlink(tmpname) | util.unlink(tmpname) | ||||
return False | return False | ||||
util.rename(tmpname, storefilename) | util.rename(tmpname, storefilename) | ||||
lfutil.linktousercache(self.repo, hash) | lfutil.linktousercache(self.repo, hash) | ||||
return True | return True | ||||
def verify(self, revs, contents=False): | def verify(self, revs, contents=False): | ||||
'''Verify the existence (and, optionally, contents) of every big | """Verify the existence (and, optionally, contents) of every big | ||||
file revision referenced by every changeset in revs. | file revision referenced by every changeset in revs. | ||||
Return 0 if all is well, non-zero on any errors.''' | Return 0 if all is well, non-zero on any errors.""" | ||||
self.ui.status( | self.ui.status( | ||||
_(b'searching %d changesets for largefiles\n') % len(revs) | _(b'searching %d changesets for largefiles\n') % len(revs) | ||||
) | ) | ||||
verified = set() # set of (filename, filenode) tuples | verified = set() # set of (filename, filenode) tuples | ||||
filestocheck = [] # list of (cset, filename, expectedhash) | filestocheck = [] # list of (cset, filename, expectedhash) | ||||
for rev in revs: | for rev in revs: | ||||
cctx = self.repo[rev] | cctx = self.repo[rev] | ||||
else: | else: | ||||
self.ui.status( | self.ui.status( | ||||
_(b'verified existence of %d revisions of %d largefiles\n') | _(b'verified existence of %d revisions of %d largefiles\n') | ||||
% (numrevs, numlfiles) | % (numrevs, numlfiles) | ||||
) | ) | ||||
return int(failed) | return int(failed) | ||||
def _getfile(self, tmpfile, filename, hash): | def _getfile(self, tmpfile, filename, hash): | ||||
'''Fetch one revision of one file from the store and write it | """Fetch one revision of one file from the store and write it | ||||
to tmpfile. Compute the hash of the file on-the-fly as it | to tmpfile. Compute the hash of the file on-the-fly as it | ||||
downloads and return the hash. Close tmpfile. Raise | downloads and return the hash. Close tmpfile. Raise | ||||
StoreError if unable to download the file (e.g. it does not | StoreError if unable to download the file (e.g. it does not | ||||
exist in the store).''' | exist in the store).""" | ||||
raise NotImplementedError(b'abstract method') | raise NotImplementedError(b'abstract method') | ||||
def _verifyfiles(self, contents, filestocheck): | def _verifyfiles(self, contents, filestocheck): | ||||
'''Perform the actual verification of files in the store. | """Perform the actual verification of files in the store. | ||||
'contents' controls verification of content hash. | 'contents' controls verification of content hash. | ||||
'filestocheck' is list of files to check. | 'filestocheck' is list of files to check. | ||||
Returns _true_ if any problems are found! | Returns _true_ if any problems are found! | ||||
''' | """ | ||||
raise NotImplementedError(b'abstract method') | raise NotImplementedError(b'abstract method') |
_(b'convert from a largefiles repo to a normal repo'), | _(b'convert from a largefiles repo to a normal repo'), | ||||
), | ), | ||||
], | ], | ||||
_(b'hg lfconvert SOURCE DEST [FILE ...]'), | _(b'hg lfconvert SOURCE DEST [FILE ...]'), | ||||
norepo=True, | norepo=True, | ||||
inferrepo=True, | inferrepo=True, | ||||
) | ) | ||||
def lfconvert(ui, src, dest, *pats, **opts): | def lfconvert(ui, src, dest, *pats, **opts): | ||||
'''convert a normal repository to a largefiles repository | """convert a normal repository to a largefiles repository | ||||
Convert repository SOURCE to a new repository DEST, identical to | Convert repository SOURCE to a new repository DEST, identical to | ||||
SOURCE except that certain files will be converted as largefiles: | SOURCE except that certain files will be converted as largefiles: | ||||
specifically, any file that matches any PATTERN *or* whose size is | specifically, any file that matches any PATTERN *or* whose size is | ||||
above the minimum size threshold is converted as a largefile. The | above the minimum size threshold is converted as a largefile. The | ||||
size used to determine whether or not to track a file as a | size used to determine whether or not to track a file as a | ||||
largefile is the size of the first version of the file. The | largefile is the size of the first version of the file. The | ||||
minimum size can be specified either with --size or in | minimum size can be specified either with --size or in | ||||
configuration as ``largefiles.size``. | configuration as ``largefiles.size``. | ||||
After running this command you will need to make sure that | After running this command you will need to make sure that | ||||
largefiles is enabled anywhere you intend to push the new | largefiles is enabled anywhere you intend to push the new | ||||
repository. | repository. | ||||
Use --to-normal to convert largefiles back to normal files; after | Use --to-normal to convert largefiles back to normal files; after | ||||
this, the DEST repository can be used without largefiles at all.''' | this, the DEST repository can be used without largefiles at all.""" | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
if opts[b'to_normal']: | if opts[b'to_normal']: | ||||
tolfile = False | tolfile = False | ||||
else: | else: | ||||
tolfile = True | tolfile = True | ||||
size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None) | size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None) | ||||
newdata.append(b'%s %s\n' % (node.hex(revmap[newid]), name)) | newdata.append(b'%s %s\n' % (node.hex(revmap[newid]), name)) | ||||
except KeyError: | except KeyError: | ||||
ui.warn(_(b'no mapping for id %s\n') % id) | ui.warn(_(b'no mapping for id %s\n') % id) | ||||
continue | continue | ||||
return b''.join(newdata) | return b''.join(newdata) | ||||
def _islfile(file, ctx, matcher, size): | def _islfile(file, ctx, matcher, size): | ||||
'''Return true if file should be considered a largefile, i.e. | """Return true if file should be considered a largefile, i.e. | ||||
matcher matches it or it is larger than size.''' | matcher matches it or it is larger than size.""" | ||||
# never store special .hg* files as largefiles | # never store special .hg* files as largefiles | ||||
if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs': | if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs': | ||||
return False | return False | ||||
if matcher and matcher(file): | if matcher and matcher(file): | ||||
return True | return True | ||||
try: | try: | ||||
return ctx.filectx(file).size() >= size * 1024 * 1024 | return ctx.filectx(file).size() >= size * 1024 * 1024 | ||||
except error.LookupError: | except error.LookupError: | ||||
% hash | % hash | ||||
) | ) | ||||
# XXX check for errors here | # XXX check for errors here | ||||
store.put(source, hash) | store.put(source, hash) | ||||
at += 1 | at += 1 | ||||
def verifylfiles(ui, repo, all=False, contents=False): | def verifylfiles(ui, repo, all=False, contents=False): | ||||
'''Verify that every largefile revision in the current changeset | """Verify that every largefile revision in the current changeset | ||||
exists in the central store. With --contents, also verify that | exists in the central store. With --contents, also verify that | ||||
the contents of each local largefile file revision are correct (SHA-1 hash | the contents of each local largefile file revision are correct (SHA-1 hash | ||||
matches the revision ID). With --all, check every changeset in | matches the revision ID). With --all, check every changeset in | ||||
this repository.''' | this repository.""" | ||||
if all: | if all: | ||||
revs = repo.revs(b'all()') | revs = repo.revs(b'all()') | ||||
else: | else: | ||||
revs = [b'.'] | revs = [b'.'] | ||||
store = storefactory.openstore(repo) | store = storefactory.openstore(repo) | ||||
return store.verify(revs, contents=contents) | return store.verify(revs, contents=contents) | ||||
def cachelfiles(ui, repo, node, filelist=None): | def cachelfiles(ui, repo, node, filelist=None): | ||||
'''cachelfiles ensures that all largefiles needed by the specified revision | """cachelfiles ensures that all largefiles needed by the specified revision | ||||
are present in the repository's largefile cache. | are present in the repository's largefile cache. | ||||
returns a tuple (cached, missing). cached is the list of files downloaded | returns a tuple (cached, missing). cached is the list of files downloaded | ||||
by this operation; missing is the list of files that were needed but could | by this operation; missing is the list of files that were needed but could | ||||
not be found.''' | not be found.""" | ||||
lfiles = lfutil.listlfiles(repo, node) | lfiles = lfutil.listlfiles(repo, node) | ||||
if filelist: | if filelist: | ||||
lfiles = set(lfiles) & set(filelist) | lfiles = set(lfiles) & set(filelist) | ||||
toget = [] | toget = [] | ||||
ctx = repo[node] | ctx = repo[node] | ||||
for lfile in lfiles: | for lfile in lfiles: | ||||
try: | try: | ||||
if totalmissing > 0: | if totalmissing > 0: | ||||
ui.status(_(b"%d largefiles failed to download\n") % totalmissing) | ui.status(_(b"%d largefiles failed to download\n") % totalmissing) | ||||
return totalsuccess, totalmissing | return totalsuccess, totalmissing | ||||
def updatelfiles( | def updatelfiles( | ||||
ui, repo, filelist=None, printmessage=None, normallookup=False | ui, repo, filelist=None, printmessage=None, normallookup=False | ||||
): | ): | ||||
'''Update largefiles according to standins in the working directory | """Update largefiles according to standins in the working directory | ||||
If ``printmessage`` is other than ``None``, it means "print (or | If ``printmessage`` is other than ``None``, it means "print (or | ||||
ignore, for false) message forcibly". | ignore, for false) message forcibly". | ||||
''' | """ | ||||
statuswriter = lfutil.getstatuswriter(ui, repo, printmessage) | statuswriter = lfutil.getstatuswriter(ui, repo, printmessage) | ||||
with repo.wlock(): | with repo.wlock(): | ||||
lfdirstate = lfutil.openlfdirstate(ui, repo) | lfdirstate = lfutil.openlfdirstate(ui, repo) | ||||
lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) | lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) | ||||
if filelist is not None: | if filelist is not None: | ||||
filelist = set(filelist) | filelist = set(filelist) | ||||
lfiles = [f for f in lfiles if f in filelist] | lfiles = [f for f in lfiles if f in filelist] |
# if hardlinks fail, fallback on atomic copy | # if hardlinks fail, fallback on atomic copy | ||||
with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf: | with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf: | ||||
for chunk in util.filechunkiter(srcf): | for chunk in util.filechunkiter(srcf): | ||||
dstf.write(chunk) | dstf.write(chunk) | ||||
os.chmod(dest, os.stat(src).st_mode) | os.chmod(dest, os.stat(src).st_mode) | ||||
def usercachepath(ui, hash): | def usercachepath(ui, hash): | ||||
'''Return the correct location in the "global" largefiles cache for a file | """Return the correct location in the "global" largefiles cache for a file | ||||
with the given hash. | with the given hash. | ||||
This cache is used for sharing of largefiles across repositories - both | This cache is used for sharing of largefiles across repositories - both | ||||
to preserve download bandwidth and storage space.''' | to preserve download bandwidth and storage space.""" | ||||
return os.path.join(_usercachedir(ui), hash) | return os.path.join(_usercachedir(ui), hash) | ||||
def _usercachedir(ui, name=longname): | def _usercachedir(ui, name=longname): | ||||
'''Return the location of the "global" largefiles cache.''' | '''Return the location of the "global" largefiles cache.''' | ||||
path = ui.configpath(name, b'usercache') | path = ui.configpath(name, b'usercache') | ||||
if path: | if path: | ||||
return path | return path | ||||
def inusercache(ui, hash): | def inusercache(ui, hash): | ||||
path = usercachepath(ui, hash) | path = usercachepath(ui, hash) | ||||
return os.path.exists(path) | return os.path.exists(path) | ||||
def findfile(repo, hash): | def findfile(repo, hash): | ||||
'''Return store path of the largefile with the specified hash. | """Return store path of the largefile with the specified hash. | ||||
As a side effect, the file might be linked from user cache. | As a side effect, the file might be linked from user cache. | ||||
Return None if the file can't be found locally.''' | Return None if the file can't be found locally.""" | ||||
path, exists = findstorepath(repo, hash) | path, exists = findstorepath(repo, hash) | ||||
if exists: | if exists: | ||||
repo.ui.note(_(b'found %s in store\n') % hash) | repo.ui.note(_(b'found %s in store\n') % hash) | ||||
return path | return path | ||||
elif inusercache(repo.ui, hash): | elif inusercache(repo.ui, hash): | ||||
repo.ui.note(_(b'found %s in system cache\n') % hash) | repo.ui.note(_(b'found %s in system cache\n') % hash) | ||||
path = storepath(repo, hash) | path = storepath(repo, hash) | ||||
link(usercachepath(repo.ui, hash), path) | link(usercachepath(repo.ui, hash), path) | ||||
def write(self, tr=False): | def write(self, tr=False): | ||||
# (1) disable PENDING mode always | # (1) disable PENDING mode always | ||||
# (lfdirstate isn't yet managed as a part of the transaction) | # (lfdirstate isn't yet managed as a part of the transaction) | ||||
# (2) avoid develwarn 'use dirstate.write with ....' | # (2) avoid develwarn 'use dirstate.write with ....' | ||||
super(largefilesdirstate, self).write(None) | super(largefilesdirstate, self).write(None) | ||||
def openlfdirstate(ui, repo, create=True): | def openlfdirstate(ui, repo, create=True): | ||||
''' | """ | ||||
Return a dirstate object that tracks largefiles: i.e. its root is | Return a dirstate object that tracks largefiles: i.e. its root is | ||||
the repo root, but it is saved in .hg/largefiles/dirstate. | the repo root, but it is saved in .hg/largefiles/dirstate. | ||||
''' | """ | ||||
vfs = repo.vfs | vfs = repo.vfs | ||||
lfstoredir = longname | lfstoredir = longname | ||||
opener = vfsmod.vfs(vfs.join(lfstoredir)) | opener = vfsmod.vfs(vfs.join(lfstoredir)) | ||||
lfdirstate = largefilesdirstate( | lfdirstate = largefilesdirstate( | ||||
opener, | opener, | ||||
ui, | ui, | ||||
repo.root, | repo.root, | ||||
repo.dirstate._validate, | repo.dirstate._validate, | ||||
modified.append(lfile) | modified.append(lfile) | ||||
else: | else: | ||||
clean.append(lfile) | clean.append(lfile) | ||||
lfdirstate.normal(lfile) | lfdirstate.normal(lfile) | ||||
return s | return s | ||||
def listlfiles(repo, rev=None, matcher=None): | def listlfiles(repo, rev=None, matcher=None): | ||||
'''return a list of largefiles in the working copy or the | """return a list of largefiles in the working copy or the | ||||
specified changeset''' | specified changeset""" | ||||
if matcher is None: | if matcher is None: | ||||
matcher = getstandinmatcher(repo) | matcher = getstandinmatcher(repo) | ||||
# ignore unknown files in working directory | # ignore unknown files in working directory | ||||
return [ | return [ | ||||
splitstandin(f) | splitstandin(f) | ||||
for f in repo[rev].walk(matcher) | for f in repo[rev].walk(matcher) | ||||
if rev is not None or repo.dirstate[f] != b'?' | if rev is not None or repo.dirstate[f] != b'?' | ||||
] | ] | ||||
def instore(repo, hash, forcelocal=False): | def instore(repo, hash, forcelocal=False): | ||||
'''Return true if a largefile with the given hash exists in the store''' | '''Return true if a largefile with the given hash exists in the store''' | ||||
return os.path.exists(storepath(repo, hash, forcelocal)) | return os.path.exists(storepath(repo, hash, forcelocal)) | ||||
def storepath(repo, hash, forcelocal=False): | def storepath(repo, hash, forcelocal=False): | ||||
'''Return the correct location in the repository largefiles store for a | """Return the correct location in the repository largefiles store for a | ||||
file with the given hash.''' | file with the given hash.""" | ||||
if not forcelocal and repo.shared(): | if not forcelocal and repo.shared(): | ||||
return repo.vfs.reljoin(repo.sharedpath, longname, hash) | return repo.vfs.reljoin(repo.sharedpath, longname, hash) | ||||
return repo.vfs.join(longname, hash) | return repo.vfs.join(longname, hash) | ||||
def findstorepath(repo, hash): | def findstorepath(repo, hash): | ||||
'''Search through the local store path(s) to find the file for the given | """Search through the local store path(s) to find the file for the given | ||||
hash. If the file is not found, its path in the primary store is returned. | hash. If the file is not found, its path in the primary store is returned. | ||||
The return value is a tuple of (path, exists(path)). | The return value is a tuple of (path, exists(path)). | ||||
''' | """ | ||||
# For shared repos, the primary store is in the share source. But for | # For shared repos, the primary store is in the share source. But for | ||||
# backward compatibility, force a lookup in the local store if it wasn't | # backward compatibility, force a lookup in the local store if it wasn't | ||||
# found in the share source. | # found in the share source. | ||||
path = storepath(repo, hash, False) | path = storepath(repo, hash, False) | ||||
if instore(repo, hash): | if instore(repo, hash): | ||||
return (path, True) | return (path, True) | ||||
elif repo.shared() and instore(repo, hash, True): | elif repo.shared() and instore(repo, hash, True): | ||||
return storepath(repo, hash, True), True | return storepath(repo, hash, True), True | ||||
return (path, False) | return (path, False) | ||||
def copyfromcache(repo, hash, filename): | def copyfromcache(repo, hash, filename): | ||||
'''Copy the specified largefile from the repo or system cache to | """Copy the specified largefile from the repo or system cache to | ||||
filename in the repository. Return true on success or false if the | filename in the repository. Return true on success or false if the | ||||
file was not found in either cache (which should not happened: | file was not found in either cache (which should not happened: | ||||
this is meant to be called only after ensuring that the needed | this is meant to be called only after ensuring that the needed | ||||
largefile exists in the cache).''' | largefile exists in the cache).""" | ||||
wvfs = repo.wvfs | wvfs = repo.wvfs | ||||
path = findfile(repo, hash) | path = findfile(repo, hash) | ||||
if path is None: | if path is None: | ||||
return False | return False | ||||
wvfs.makedirs(wvfs.dirname(wvfs.join(filename))) | wvfs.makedirs(wvfs.dirname(wvfs.join(filename))) | ||||
# The write may fail before the file is fully written, but we | # The write may fail before the file is fully written, but we | ||||
# don't use atomic writes in the working copy. | # don't use atomic writes in the working copy. | ||||
with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd: | with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd: | ||||
storepath(repo, hash), createmode=repo.store.createmode | storepath(repo, hash), createmode=repo.store.createmode | ||||
) as dstf: | ) as dstf: | ||||
for chunk in util.filechunkiter(srcf): | for chunk in util.filechunkiter(srcf): | ||||
dstf.write(chunk) | dstf.write(chunk) | ||||
linktousercache(repo, hash) | linktousercache(repo, hash) | ||||
def linktousercache(repo, hash): | def linktousercache(repo, hash): | ||||
'''Link / copy the largefile with the specified hash from the store | """Link / copy the largefile with the specified hash from the store | ||||
to the cache.''' | to the cache.""" | ||||
path = usercachepath(repo.ui, hash) | path = usercachepath(repo.ui, hash) | ||||
link(storepath(repo, hash), path) | link(storepath(repo, hash), path) | ||||
def getstandinmatcher(repo, rmatcher=None): | def getstandinmatcher(repo, rmatcher=None): | ||||
'''Return a match object that applies rmatcher to the standin directory''' | '''Return a match object that applies rmatcher to the standin directory''' | ||||
wvfs = repo.wvfs | wvfs = repo.wvfs | ||||
standindir = shortname | standindir = shortname | ||||
# no warnings about missing files or directories | # no warnings about missing files or directories | ||||
badfn = lambda f, msg: None | badfn = lambda f, msg: None | ||||
if rmatcher and not rmatcher.always(): | if rmatcher and not rmatcher.always(): | ||||
pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()] | pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()] | ||||
if not pats: | if not pats: | ||||
pats = [wvfs.join(standindir)] | pats = [wvfs.join(standindir)] | ||||
match = scmutil.match(repo[None], pats, badfn=badfn) | match = scmutil.match(repo[None], pats, badfn=badfn) | ||||
else: | else: | ||||
# no patterns: relative to repo root | # no patterns: relative to repo root | ||||
match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn) | match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn) | ||||
return match | return match | ||||
def composestandinmatcher(repo, rmatcher): | def composestandinmatcher(repo, rmatcher): | ||||
'''Return a matcher that accepts standins corresponding to the | """Return a matcher that accepts standins corresponding to the | ||||
files accepted by rmatcher. Pass the list of files in the matcher | files accepted by rmatcher. Pass the list of files in the matcher | ||||
as the paths specified by the user.''' | as the paths specified by the user.""" | ||||
smatcher = getstandinmatcher(repo, rmatcher) | smatcher = getstandinmatcher(repo, rmatcher) | ||||
isstandin = smatcher.matchfn | isstandin = smatcher.matchfn | ||||
def composedmatchfn(f): | def composedmatchfn(f): | ||||
return isstandin(f) and rmatcher.matchfn(splitstandin(f)) | return isstandin(f) and rmatcher.matchfn(splitstandin(f)) | ||||
smatcher.matchfn = composedmatchfn | smatcher.matchfn = composedmatchfn | ||||
return smatcher | return smatcher | ||||
def standin(filename): | def standin(filename): | ||||
'''Return the repo-relative path to the standin for the specified big | """Return the repo-relative path to the standin for the specified big | ||||
file.''' | file.""" | ||||
# Notes: | # Notes: | ||||
# 1) Some callers want an absolute path, but for instance addlargefiles | # 1) Some callers want an absolute path, but for instance addlargefiles | ||||
# needs it repo-relative so it can be passed to repo[None].add(). So | # needs it repo-relative so it can be passed to repo[None].add(). So | ||||
# leave it up to the caller to use repo.wjoin() to get an absolute path. | # leave it up to the caller to use repo.wjoin() to get an absolute path. | ||||
# 2) Join with '/' because that's what dirstate always uses, even on | # 2) Join with '/' because that's what dirstate always uses, even on | ||||
# Windows. Change existing separator to '/' first in case we are | # Windows. Change existing separator to '/' first in case we are | ||||
# passed filenames from an external source (like the command line). | # passed filenames from an external source (like the command line). | ||||
return shortnameslash + util.pconvert(filename) | return shortnameslash + util.pconvert(filename) | ||||
def isstandin(filename): | def isstandin(filename): | ||||
'''Return true if filename is a big file standin. filename must be | """Return true if filename is a big file standin. filename must be | ||||
in Mercurial's internal form (slash-separated).''' | in Mercurial's internal form (slash-separated).""" | ||||
return filename.startswith(shortnameslash) | return filename.startswith(shortnameslash) | ||||
def splitstandin(filename): | def splitstandin(filename): | ||||
# Split on / because that's what dirstate always uses, even on Windows. | # Split on / because that's what dirstate always uses, even on Windows. | ||||
# Change local separator to / first just in case we are passed filenames | # Change local separator to / first just in case we are passed filenames | ||||
# from an external source (like the command line). | # from an external source (like the command line). | ||||
bits = util.pconvert(filename).split(b'/', 1) | bits = util.pconvert(filename).split(b'/', 1) | ||||
hash = hashfile(file) | hash = hashfile(file) | ||||
executable = getexecutable(file) | executable = getexecutable(file) | ||||
writestandin(repo, standin, hash, executable) | writestandin(repo, standin, hash, executable) | ||||
else: | else: | ||||
raise error.Abort(_(b'%s: file not found!') % lfile) | raise error.Abort(_(b'%s: file not found!') % lfile) | ||||
def readasstandin(fctx): | def readasstandin(fctx): | ||||
'''read hex hash from given filectx of standin file | """read hex hash from given filectx of standin file | ||||
This encapsulates how "standin" data is stored into storage layer.''' | This encapsulates how "standin" data is stored into storage layer.""" | ||||
return fctx.data().strip() | return fctx.data().strip() | ||||
def writestandin(repo, standin, hash, executable): | def writestandin(repo, standin, hash, executable): | ||||
'''write hash to <repo.root>/<standin>''' | '''write hash to <repo.root>/<standin>''' | ||||
repo.wwrite(standin, hash + b'\n', executable and b'x' or b'') | repo.wwrite(standin, hash + b'\n', executable and b'x' or b'') | ||||
def copyandhash(instream, outfile): | def copyandhash(instream, outfile): | ||||
'''Read bytes from instream (iterable) and write them to outfile, | """Read bytes from instream (iterable) and write them to outfile, | ||||
computing the SHA-1 hash of the data along the way. Return the hash.''' | computing the SHA-1 hash of the data along the way. Return the hash.""" | ||||
hasher = hashutil.sha1(b'') | hasher = hashutil.sha1(b'') | ||||
for data in instream: | for data in instream: | ||||
hasher.update(data) | hasher.update(data) | ||||
outfile.write(data) | outfile.write(data) | ||||
return hex(hasher.digest()) | return hex(hasher.digest()) | ||||
def hashfile(file): | def hashfile(file): | ||||
if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): | ||||
files.add(f) | files.add(f) | ||||
for fn in files: | for fn in files: | ||||
if isstandin(fn) and fn in ctx: | if isstandin(fn) and fn in ctx: | ||||
addfunc(fn, readasstandin(ctx[fn])) | addfunc(fn, readasstandin(ctx[fn])) | ||||
def updatestandinsbymatch(repo, match): | def updatestandinsbymatch(repo, match): | ||||
'''Update standins in the working directory according to specified match | """Update standins in the working directory according to specified match | ||||
This returns (possibly modified) ``match`` object to be used for | This returns (possibly modified) ``match`` object to be used for | ||||
subsequent commit process. | subsequent commit process. | ||||
''' | """ | ||||
ui = repo.ui | ui = repo.ui | ||||
# Case 1: user calls commit with no specific files or | # Case 1: user calls commit with no specific files or | ||||
# include/exclude patterns: refresh and commit all files that | # include/exclude patterns: refresh and commit all files that | ||||
# are "dirty". | # are "dirty". | ||||
if match is None or match.always(): | if match is None or match.always(): | ||||
# Spend a bit of time here to get a list of files we know | # Spend a bit of time here to get a list of files we know | ||||
return f in standins | return f in standins | ||||
match.matchfn = matchfn | match.matchfn = matchfn | ||||
return match | return match | ||||
class automatedcommithook(object): | class automatedcommithook(object): | ||||
'''Stateful hook to update standins at the 1st commit of resuming | """Stateful hook to update standins at the 1st commit of resuming | ||||
For efficiency, updating standins in the working directory should | For efficiency, updating standins in the working directory should | ||||
be avoided while automated committing (like rebase, transplant and | be avoided while automated committing (like rebase, transplant and | ||||
so on), because they should be updated before committing. | so on), because they should be updated before committing. | ||||
But the 1st commit of resuming automated committing (e.g. ``rebase | But the 1st commit of resuming automated committing (e.g. ``rebase | ||||
--continue``) should update them, because largefiles may be | --continue``) should update them, because largefiles may be | ||||
modified manually. | modified manually. | ||||
''' | """ | ||||
def __init__(self, resuming): | def __init__(self, resuming): | ||||
self.resuming = resuming | self.resuming = resuming | ||||
def __call__(self, repo, match): | def __call__(self, repo, match): | ||||
if self.resuming: | if self.resuming: | ||||
self.resuming = False # avoids updating at subsequent commits | self.resuming = False # avoids updating at subsequent commits | ||||
return updatestandinsbymatch(repo, match) | return updatestandinsbymatch(repo, match) | ||||
else: | else: | ||||
return match | return match | ||||
def getstatuswriter(ui, repo, forcibly=None): | def getstatuswriter(ui, repo, forcibly=None): | ||||
'''Return the function to write largefiles specific status out | """Return the function to write largefiles specific status out | ||||
If ``forcibly`` is ``None``, this returns the last element of | If ``forcibly`` is ``None``, this returns the last element of | ||||
``repo._lfstatuswriters`` as "default" writer function. | ``repo._lfstatuswriters`` as "default" writer function. | ||||
Otherwise, this returns the function to always write out (or | Otherwise, this returns the function to always write out (or | ||||
ignore if ``not forcibly``) status. | ignore if ``not forcibly``) status. | ||||
''' | """ | ||||
if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'): | if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'): | ||||
return repo._lfstatuswriters[-1] | return repo._lfstatuswriters[-1] | ||||
else: | else: | ||||
if forcibly: | if forcibly: | ||||
return ui.status # forcibly WRITE OUT | return ui.status # forcibly WRITE OUT | ||||
else: | else: | ||||
return lambda *msg, **opts: None # forcibly IGNORE | return lambda *msg, **opts: None # forcibly IGNORE |
from . import ( | from . import ( | ||||
basestore, | basestore, | ||||
lfutil, | lfutil, | ||||
) | ) | ||||
class localstore(basestore.basestore): | class localstore(basestore.basestore): | ||||
'''localstore first attempts to grab files out of the store in the remote | """localstore first attempts to grab files out of the store in the remote | ||||
Mercurial repository. Failing that, it attempts to grab the files from | Mercurial repository. Failing that, it attempts to grab the files from | ||||
the user cache.''' | the user cache.""" | ||||
def __init__(self, ui, repo, remote): | def __init__(self, ui, repo, remote): | ||||
self.remote = remote.local() | self.remote = remote.local() | ||||
super(localstore, self).__init__(ui, repo, self.remote.url()) | super(localstore, self).__init__(ui, repo, self.remote.url()) | ||||
def put(self, source, hash): | def put(self, source, hash): | ||||
if lfutil.instore(self.remote, hash): | if lfutil.instore(self.remote, hash): | ||||
return | return |
lfstatus = lfutil.lfstatus | lfstatus = lfutil.lfstatus | ||||
MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr' | MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr' | ||||
# -- Utility functions: commonly/repeatedly needed functionality --------------- | # -- Utility functions: commonly/repeatedly needed functionality --------------- | ||||
def composelargefilematcher(match, manifest): | def composelargefilematcher(match, manifest): | ||||
'''create a matcher that matches only the largefiles in the original | """create a matcher that matches only the largefiles in the original | ||||
matcher''' | matcher""" | ||||
m = copy.copy(match) | m = copy.copy(match) | ||||
lfile = lambda f: lfutil.standin(f) in manifest | lfile = lambda f: lfutil.standin(f) in manifest | ||||
m._files = [lf for lf in m._files if lfile(lf)] | m._files = [lf for lf in m._files if lfile(lf)] | ||||
m._fileset = set(m._files) | m._fileset = set(m._files) | ||||
m.always = lambda: False | m.always = lambda: False | ||||
origmatchfn = m.matchfn | origmatchfn = m.matchfn | ||||
m.matchfn = lambda f: lfile(f) and origmatchfn(f) | m.matchfn = lambda f: lfile(f) and origmatchfn(f) | ||||
return m | return m | ||||
) | ) | ||||
if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile | if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile | ||||
mresult.addfile(lfile, b'r', None, b'replaced by standin') | mresult.addfile(lfile, b'r', None, b'replaced by standin') | ||||
mresult.addfile(standin, b'g', sargs, b'replaces standin') | mresult.addfile(standin, b'g', sargs, b'replaces standin') | ||||
else: # keep local normal file | else: # keep local normal file | ||||
mresult.addfile(lfile, b'k', None, b'replaces standin') | mresult.addfile(lfile, b'k', None, b'replaces standin') | ||||
if branchmerge: | if branchmerge: | ||||
mresult.addfile( | mresult.addfile( | ||||
standin, b'k', None, b'replaced by non-standin', | standin, | ||||
b'k', | |||||
None, | |||||
b'replaced by non-standin', | |||||
) | ) | ||||
else: | else: | ||||
mresult.addfile( | mresult.addfile( | ||||
standin, b'r', None, b'replaced by non-standin', | standin, | ||||
b'r', | |||||
None, | |||||
b'replaced by non-standin', | |||||
) | ) | ||||
elif lm in (b'g', b'dc') and sm != b'r': | elif lm in (b'g', b'dc') and sm != b'r': | ||||
if lm == b'dc': | if lm == b'dc': | ||||
f1, f2, fa, move, anc = largs | f1, f2, fa, move, anc = largs | ||||
largs = (p2[f2].flags(), False) | largs = (p2[f2].flags(), False) | ||||
# Case 2: largefile in the working copy, normal file in | # Case 2: largefile in the working copy, normal file in | ||||
# the second parent | # the second parent | ||||
usermsg = ( | usermsg = ( | ||||
_( | _( | ||||
b'remote turned local largefile %s into a normal file\n' | b'remote turned local largefile %s into a normal file\n' | ||||
b'keep (l)argefile or use (n)ormal file?' | b'keep (l)argefile or use (n)ormal file?' | ||||
b'$$ &Largefile $$ &Normal file' | b'$$ &Largefile $$ &Normal file' | ||||
) | ) | ||||
% lfile | % lfile | ||||
) | ) | ||||
if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile | if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile | ||||
if branchmerge: | if branchmerge: | ||||
# largefile can be restored from standin safely | # largefile can be restored from standin safely | ||||
mresult.addfile( | mresult.addfile( | ||||
lfile, b'k', None, b'replaced by standin', | lfile, | ||||
b'k', | |||||
None, | |||||
b'replaced by standin', | |||||
) | ) | ||||
mresult.addfile(standin, b'k', None, b'replaces standin') | mresult.addfile(standin, b'k', None, b'replaces standin') | ||||
else: | else: | ||||
# "lfile" should be marked as "removed" without | # "lfile" should be marked as "removed" without | ||||
# removal of itself | # removal of itself | ||||
mresult.addfile( | mresult.addfile( | ||||
lfile, | lfile, | ||||
MERGE_ACTION_LARGEFILE_MARK_REMOVED, | MERGE_ACTION_LARGEFILE_MARK_REMOVED, | ||||
None, | None, | ||||
b'forget non-standin largefile', | b'forget non-standin largefile', | ||||
) | ) | ||||
# linear-merge should treat this largefile as 're-added' | # linear-merge should treat this largefile as 're-added' | ||||
mresult.addfile(standin, b'a', None, b'keep standin') | mresult.addfile(standin, b'a', None, b'keep standin') | ||||
else: # pick remote normal file | else: # pick remote normal file | ||||
mresult.addfile(lfile, b'g', largs, b'replaces standin') | mresult.addfile(lfile, b'g', largs, b'replaces standin') | ||||
mresult.addfile( | mresult.addfile( | ||||
standin, b'r', None, b'replaced by non-standin', | standin, | ||||
b'r', | |||||
None, | |||||
b'replaced by non-standin', | |||||
) | ) | ||||
return mresult | return mresult | ||||
@eh.wrapfunction(mergestatemod, b'recordupdates') | @eh.wrapfunction(mergestatemod, b'recordupdates') | ||||
def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata): | def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata): | ||||
if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions: | if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions: |
eh = exthelper.exthelper() | eh = exthelper.exthelper() | ||||
# these will all be replaced by largefiles.uisetup | # these will all be replaced by largefiles.uisetup | ||||
ssholdcallstream = None | ssholdcallstream = None | ||||
httpoldcallstream = None | httpoldcallstream = None | ||||
def putlfile(repo, proto, sha): | def putlfile(repo, proto, sha): | ||||
'''Server command for putting a largefile into a repository's local store | """Server command for putting a largefile into a repository's local store | ||||
and into the user cache.''' | and into the user cache.""" | ||||
with proto.mayberedirectstdio() as output: | with proto.mayberedirectstdio() as output: | ||||
path = lfutil.storepath(repo, sha) | path = lfutil.storepath(repo, sha) | ||||
util.makedirs(os.path.dirname(path)) | util.makedirs(os.path.dirname(path)) | ||||
tmpfp = util.atomictempfile(path, createmode=repo.store.createmode) | tmpfp = util.atomictempfile(path, createmode=repo.store.createmode) | ||||
try: | try: | ||||
for p in proto.getpayload(): | for p in proto.getpayload(): | ||||
tmpfp.write(p) | tmpfp.write(p) | ||||
) | ) | ||||
finally: | finally: | ||||
tmpfp.discard() | tmpfp.discard() | ||||
return wireprototypes.pushres(0, output.getvalue() if output else b'') | return wireprototypes.pushres(0, output.getvalue() if output else b'') | ||||
def getlfile(repo, proto, sha): | def getlfile(repo, proto, sha): | ||||
'''Server command for retrieving a largefile from the repository-local | """Server command for retrieving a largefile from the repository-local | ||||
cache or user cache.''' | cache or user cache.""" | ||||
filename = lfutil.findfile(repo, sha) | filename = lfutil.findfile(repo, sha) | ||||
if not filename: | if not filename: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'requested largefile %s not present in cache') % sha | _(b'requested largefile %s not present in cache') % sha | ||||
) | ) | ||||
f = open(filename, b'rb') | f = open(filename, b'rb') | ||||
length = os.fstat(f.fileno())[6] | length = os.fstat(f.fileno())[6] | ||||
# Since we can't set an HTTP content-length header here, and | # Since we can't set an HTTP content-length header here, and | ||||
# Mercurial core provides no way to give the length of a streamres | # Mercurial core provides no way to give the length of a streamres | ||||
# (and reading the entire file into RAM would be ill-advised), we | # (and reading the entire file into RAM would be ill-advised), we | ||||
# just send the length on the first line of the response, like the | # just send the length on the first line of the response, like the | ||||
# ssh proto does for string responses. | # ssh proto does for string responses. | ||||
def generator(): | def generator(): | ||||
yield b'%d\n' % length | yield b'%d\n' % length | ||||
for chunk in util.filechunkiter(f): | for chunk in util.filechunkiter(f): | ||||
yield chunk | yield chunk | ||||
return wireprototypes.streamreslegacy(gen=generator()) | return wireprototypes.streamreslegacy(gen=generator()) | ||||
def statlfile(repo, proto, sha): | def statlfile(repo, proto, sha): | ||||
'''Server command for checking if a largefile is present - returns '2\n' if | """Server command for checking if a largefile is present - returns '2\n' if | ||||
the largefile is missing, '0\n' if it seems to be in good condition. | the largefile is missing, '0\n' if it seems to be in good condition. | ||||
The value 1 is reserved for mismatched checksum, but that is too expensive | The value 1 is reserved for mismatched checksum, but that is too expensive | ||||
to be verified on every stat and must be caught be running 'hg verify' | to be verified on every stat and must be caught be running 'hg verify' | ||||
server side.''' | server side.""" | ||||
filename = lfutil.findfile(repo, sha) | filename = lfutil.findfile(repo, sha) | ||||
if not filename: | if not filename: | ||||
return wireprototypes.bytesresponse(b'2\n') | return wireprototypes.bytesresponse(b'2\n') | ||||
return wireprototypes.bytesresponse(b'0\n') | return wireprototypes.bytesresponse(b'0\n') | ||||
def wirereposetup(ui, repo): | def wirereposetup(ui, repo): | ||||
class lfileswirerepository(repo.__class__): | class lfileswirerepository(repo.__class__): | ||||
def _capabilities(orig, repo, proto): | def _capabilities(orig, repo, proto): | ||||
'''announce largefile server capability''' | '''announce largefile server capability''' | ||||
caps = orig(repo, proto) | caps = orig(repo, proto) | ||||
caps.append(b'largefiles=serve') | caps.append(b'largefiles=serve') | ||||
return caps | return caps | ||||
def heads(orig, repo, proto): | def heads(orig, repo, proto): | ||||
'''Wrap server command - largefile capable clients will know to call | """Wrap server command - largefile capable clients will know to call | ||||
lheads instead''' | lheads instead""" | ||||
if lfutil.islfilesrepo(repo): | if lfutil.islfilesrepo(repo): | ||||
return wireprototypes.ooberror(LARGEFILES_REQUIRED_MSG) | return wireprototypes.ooberror(LARGEFILES_REQUIRED_MSG) | ||||
return orig(repo, proto) | return orig(repo, proto) | ||||
def sshrepocallstream(self, cmd, **args): | def sshrepocallstream(self, cmd, **args): | ||||
if cmd == b'heads' and self.capable(b'largefiles'): | if cmd == b'heads' and self.capable(b'largefiles'): |
'''Put file with the given hash in the remote store.''' | '''Put file with the given hash in the remote store.''' | ||||
raise NotImplementedError(b'abstract method') | raise NotImplementedError(b'abstract method') | ||||
def _get(self, hash): | def _get(self, hash): | ||||
'''Get a iterator for content with the given hash.''' | '''Get a iterator for content with the given hash.''' | ||||
raise NotImplementedError(b'abstract method') | raise NotImplementedError(b'abstract method') | ||||
def _stat(self, hashes): | def _stat(self, hashes): | ||||
'''Get information about availability of files specified by | """Get information about availability of files specified by | ||||
hashes in the remote store. Return dictionary mapping hashes | hashes in the remote store. Return dictionary mapping hashes | ||||
to return code where 0 means that file is available, other | to return code where 0 means that file is available, other | ||||
values if not.''' | values if not.""" | ||||
raise NotImplementedError(b'abstract method') | raise NotImplementedError(b'abstract method') |
editor=editor, | editor=editor, | ||||
extra=extra, | extra=extra, | ||||
) | ) | ||||
return result | return result | ||||
# TODO: _subdirlfs should be moved into "lfutil.py", because | # TODO: _subdirlfs should be moved into "lfutil.py", because | ||||
# it is referred only from "lfutil.updatestandinsbymatch" | # it is referred only from "lfutil.updatestandinsbymatch" | ||||
def _subdirlfs(self, files, lfiles): | def _subdirlfs(self, files, lfiles): | ||||
''' | """ | ||||
Adjust matched file list | Adjust matched file list | ||||
If we pass a directory to commit whose only committable files | If we pass a directory to commit whose only committable files | ||||
are largefiles, the core commit code aborts before finding | are largefiles, the core commit code aborts before finding | ||||
the largefiles. | the largefiles. | ||||
So we do the following: | So we do the following: | ||||
For directories that only have largefiles as matches, | For directories that only have largefiles as matches, | ||||
we explicitly add the largefiles to the match list and remove | we explicitly add the largefiles to the match list and remove | ||||
the directory. | the directory. | ||||
In other cases, we leave the match list unmodified. | In other cases, we leave the match list unmodified. | ||||
''' | """ | ||||
actualfiles = [] | actualfiles = [] | ||||
dirs = [] | dirs = [] | ||||
regulars = [] | regulars = [] | ||||
for f in files: | for f in files: | ||||
if lfutil.isstandin(f + b'/'): | if lfutil.isstandin(f + b'/'): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'file "%s" is a largefile standin') % f, | _(b'file "%s" is a largefile standin') % f, |
def _put(self, hash, fd): | def _put(self, hash, fd): | ||||
return self.remote.putlfile(hash, fd) | return self.remote.putlfile(hash, fd) | ||||
def _get(self, hash): | def _get(self, hash): | ||||
return self.remote.getlfile(hash) | return self.remote.getlfile(hash) | ||||
def _stat(self, hashes): | def _stat(self, hashes): | ||||
'''For each hash, return 0 if it is available, other values if not. | """For each hash, return 0 if it is available, other values if not. | ||||
It is usually 2 if the largefile is missing, but might be 1 the server | It is usually 2 if the largefile is missing, but might be 1 the server | ||||
has a corrupted copy.''' | has a corrupted copy.""" | ||||
with self.remote.commandexecutor() as e: | with self.remote.commandexecutor() as e: | ||||
fs = [] | fs = [] | ||||
for hash in hashes: | for hash in hashes: | ||||
fs.append((hash, e.callcommand(b'statlfile', {b'sha': hash,}))) | fs.append( | ||||
( | |||||
hash, | |||||
e.callcommand( | |||||
b'statlfile', | |||||
{ | |||||
b'sha': hash, | |||||
}, | |||||
), | |||||
) | |||||
) | |||||
return {hash: f.result() for hash, f in fs} | return {hash: f.result() for hash, f in fs} |
configtable = eh.configtable | configtable = eh.configtable | ||||
extsetup = eh.finalextsetup | extsetup = eh.finalextsetup | ||||
uisetup = eh.finaluisetup | uisetup = eh.finaluisetup | ||||
filesetpredicate = eh.filesetpredicate | filesetpredicate = eh.filesetpredicate | ||||
reposetup = eh.finalreposetup | reposetup = eh.finalreposetup | ||||
templatekeyword = eh.templatekeyword | templatekeyword = eh.templatekeyword | ||||
eh.configitem( | eh.configitem( | ||||
b'experimental', b'lfs.serve', default=True, | b'experimental', | ||||
b'lfs.serve', | |||||
default=True, | |||||
) | ) | ||||
eh.configitem( | eh.configitem( | ||||
b'experimental', b'lfs.user-agent', default=None, | b'experimental', | ||||
b'lfs.user-agent', | |||||
default=None, | |||||
) | ) | ||||
eh.configitem( | eh.configitem( | ||||
b'experimental', b'lfs.disableusercache', default=False, | b'experimental', | ||||
b'lfs.disableusercache', | |||||
default=False, | |||||
) | ) | ||||
eh.configitem( | eh.configitem( | ||||
b'experimental', b'lfs.worker-enable', default=True, | b'experimental', | ||||
b'lfs.worker-enable', | |||||
default=True, | |||||
) | ) | ||||
eh.configitem( | eh.configitem( | ||||
b'lfs', b'url', default=None, | b'lfs', | ||||
b'url', | |||||
default=None, | |||||
) | ) | ||||
eh.configitem( | eh.configitem( | ||||
b'lfs', b'usercache', default=None, | b'lfs', | ||||
b'usercache', | |||||
default=None, | |||||
) | ) | ||||
# Deprecated | # Deprecated | ||||
eh.configitem( | eh.configitem( | ||||
b'lfs', b'threshold', default=None, | b'lfs', | ||||
b'threshold', | |||||
default=None, | |||||
) | ) | ||||
eh.configitem( | eh.configitem( | ||||
b'lfs', b'track', default=b'none()', | b'lfs', | ||||
b'track', | |||||
default=b'none()', | |||||
) | ) | ||||
eh.configitem( | eh.configitem( | ||||
b'lfs', b'retry', default=5, | b'lfs', | ||||
b'retry', | |||||
default=5, | |||||
) | ) | ||||
lfsprocessor = ( | lfsprocessor = ( | ||||
wrapper.readfromstore, | wrapper.readfromstore, | ||||
wrapper.writetostore, | wrapper.writetostore, | ||||
wrapper.bypasscheckhash, | wrapper.bypasscheckhash, | ||||
) | ) | ||||
def walk(self, path=None, onerror=None): | def walk(self, path=None, onerror=None): | ||||
return (b'', [], []) | return (b'', [], []) | ||||
def write(self, oid, data): | def write(self, oid, data): | ||||
pass | pass | ||||
class lfsuploadfile(httpconnectionmod.httpsendfile): | class lfsuploadfile(httpconnectionmod.httpsendfile): | ||||
"""a file-like object that supports keepalive. | """a file-like object that supports keepalive.""" | ||||
""" | |||||
def __init__(self, ui, filename): | def __init__(self, ui, filename): | ||||
super(lfsuploadfile, self).__init__(ui, filename, b'rb') | super(lfsuploadfile, self).__init__(ui, filename, b'rb') | ||||
self.read = self._data.read | self.read = self._data.read | ||||
def _makeprogress(self): | def _makeprogress(self): | ||||
return None # progress is handled by the worker client | return None # progress is handled by the worker client | ||||
def has(self, oid): | def has(self, oid): | ||||
"""Returns True if the local blobstore contains the requested blob, | """Returns True if the local blobstore contains the requested blob, | ||||
False otherwise.""" | False otherwise.""" | ||||
return self.cachevfs.exists(oid) or self.vfs.exists(oid) | return self.cachevfs.exists(oid) or self.vfs.exists(oid) | ||||
def _urlerrorreason(urlerror): | def _urlerrorreason(urlerror): | ||||
'''Create a friendly message for the given URLError to be used in an | """Create a friendly message for the given URLError to be used in an | ||||
LfsRemoteError message. | LfsRemoteError message. | ||||
''' | """ | ||||
inst = urlerror | inst = urlerror | ||||
if isinstance(urlerror.reason, Exception): | if isinstance(urlerror.reason, Exception): | ||||
inst = urlerror.reason | inst = urlerror.reason | ||||
if util.safehasattr(inst, b'reason'): | if util.safehasattr(inst, b'reason'): | ||||
try: # usually it is in the form (errno, strerror) | try: # usually it is in the form (errno, strerror) | ||||
reason = inst.reason.args[1] | reason = inst.reason.args[1] | ||||
See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md | See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md | ||||
""" | """ | ||||
objects = [ | objects = [ | ||||
{'oid': pycompat.strurl(p.oid()), 'size': p.size()} | {'oid': pycompat.strurl(p.oid()), 'size': p.size()} | ||||
for p in pointers | for p in pointers | ||||
] | ] | ||||
requestdata = pycompat.bytesurl( | requestdata = pycompat.bytesurl( | ||||
json.dumps( | json.dumps( | ||||
{'objects': objects, 'operation': pycompat.strurl(action),} | { | ||||
'objects': objects, | |||||
'operation': pycompat.strurl(action), | |||||
} | |||||
) | ) | ||||
) | ) | ||||
url = b'%s/objects/batch' % self.baseurl | url = b'%s/objects/batch' % self.baseurl | ||||
batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata) | batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata) | ||||
batchreq.add_header('Accept', 'application/vnd.git-lfs+json') | batchreq.add_header('Accept', 'application/vnd.git-lfs+json') | ||||
batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json') | batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json') | ||||
try: | try: | ||||
with contextlib.closing(self.urlopener.open(batchreq)) as rsp: | with contextlib.closing(self.urlopener.open(batchreq)) as rsp: |
if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): | if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'): | ||||
return False | return False | ||||
# if remotestore is a null store, downloads will lead to nothing | # if remotestore is a null store, downloads will lead to nothing | ||||
return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) | return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) | ||||
def uploadblobsfromrevs(repo, revs): | def uploadblobsfromrevs(repo, revs): | ||||
'''upload lfs blobs introduced by revs | """upload lfs blobs introduced by revs | ||||
Note: also used by other extensions e. g. infinitepush. avoid renaming. | Note: also used by other extensions e. g. infinitepush. avoid renaming. | ||||
''' | """ | ||||
if _canskipupload(repo): | if _canskipupload(repo): | ||||
return | return | ||||
pointers = extractpointers(repo, revs) | pointers = extractpointers(repo, revs) | ||||
uploadblobs(repo, pointers) | uploadblobs(repo, pointers) | ||||
def prepush(pushop): | def prepush(pushop): | ||||
"""Prepush hook. | """Prepush hook. |
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'mq', b'git', default=b'auto', | b'mq', | ||||
b'git', | |||||
default=b'auto', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'mq', b'keepchanges', default=False, | b'mq', | ||||
b'keepchanges', | |||||
default=False, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'mq', b'plain', default=False, | b'mq', | ||||
b'plain', | |||||
default=False, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'mq', b'secret', default=False, | b'mq', | ||||
b'secret', | |||||
default=False, | |||||
) | ) | ||||
# force load strip extension formerly included in mq and import some utility | # force load strip extension formerly included in mq and import some utility | ||||
try: | try: | ||||
extensions.find(b'strip') | extensions.find(b'strip') | ||||
except KeyError: | except KeyError: | ||||
# note: load is lazy so we could avoid the try-except, | # note: load is lazy so we could avoid the try-except, | ||||
# but I (marmoute) prefer this explicit code. | # but I (marmoute) prefer this explicit code. | ||||
class dummyui(object): | class dummyui(object): | ||||
def debug(self, msg): | def debug(self, msg): | ||||
pass | pass | ||||
def log(self, event, msgfmt, *msgargs, **opts): | def log(self, event, msgfmt, *msgargs, **opts): | ||||
pass | pass | ||||
extensions.load(dummyui(), b'strip', b'') | extensions.load(dummyui(), b'strip', b'') | ||||
strip = strip.strip | strip = strip.strip | ||||
def checksubstate(repo, baserev=None): | def checksubstate(repo, baserev=None): | ||||
'''return list of subrepos at a different revision than substate. | """return list of subrepos at a different revision than substate. | ||||
Abort if any subrepos have uncommitted changes.''' | Abort if any subrepos have uncommitted changes.""" | ||||
inclsubs = [] | inclsubs = [] | ||||
wctx = repo[None] | wctx = repo[None] | ||||
if baserev: | if baserev: | ||||
bctx = repo[baserev] | bctx = repo[baserev] | ||||
else: | else: | ||||
bctx = wctx.p1() | bctx = wctx.p1() | ||||
for s in sorted(wctx.substate): | for s in sorted(wctx.substate): | ||||
wctx.sub(s).bailifchanged(True) | wctx.sub(s).bailifchanged(True) | ||||
s = b'\n'.join(self.comments).rstrip() | s = b'\n'.join(self.comments).rstrip() | ||||
if not s: | if not s: | ||||
return b'' | return b'' | ||||
return s + b'\n\n' | return s + b'\n\n' | ||||
__str__ = encoding.strmethod(__bytes__) | __str__ = encoding.strmethod(__bytes__) | ||||
def _delmsg(self): | def _delmsg(self): | ||||
'''Remove existing message, keeping the rest of the comments fields. | """Remove existing message, keeping the rest of the comments fields. | ||||
If comments contains 'subject: ', message will prepend | If comments contains 'subject: ', message will prepend | ||||
the field and a blank line.''' | the field and a blank line.""" | ||||
if self.message: | if self.message: | ||||
subj = b'subject: ' + self.message[0].lower() | subj = b'subject: ' + self.message[0].lower() | ||||
for i in pycompat.xrange(len(self.comments)): | for i in pycompat.xrange(len(self.comments)): | ||||
if subj == self.comments[i].lower(): | if subj == self.comments[i].lower(): | ||||
del self.comments[i] | del self.comments[i] | ||||
self.message = self.message[2:] | self.message = self.message[2:] | ||||
break | break | ||||
ci = 0 | ci = 0 | ||||
self.applied.append(statusentry(head, patch)) | self.applied.append(statusentry(head, patch)) | ||||
self.applieddirty = True | self.applieddirty = True | ||||
if err: | if err: | ||||
return (err, head) | return (err, head) | ||||
self.savedirty() | self.savedirty() | ||||
return (0, head) | return (0, head) | ||||
def patch(self, repo, patchfile): | def patch(self, repo, patchfile): | ||||
'''Apply patchfile to the working directory. | """Apply patchfile to the working directory. | ||||
patchfile: name of patch file''' | patchfile: name of patch file""" | ||||
files = set() | files = set() | ||||
try: | try: | ||||
fuzz = patchmod.patch( | fuzz = patchmod.patch( | ||||
self.ui, repo, patchfile, strip=1, files=files, eolmode=None | self.ui, repo, patchfile, strip=1, files=files, eolmode=None | ||||
) | ) | ||||
return (True, list(files), fuzz) | return (True, list(files), fuzz) | ||||
except Exception as inst: | except Exception as inst: | ||||
self.ui.note(stringutil.forcebytestr(inst) + b'\n') | self.ui.note(stringutil.forcebytestr(inst) + b'\n') | ||||
return name | return name | ||||
def checkkeepchanges(self, keepchanges, force): | def checkkeepchanges(self, keepchanges, force): | ||||
if force and keepchanges: | if force and keepchanges: | ||||
raise error.Abort(_(b'cannot use both --force and --keep-changes')) | raise error.Abort(_(b'cannot use both --force and --keep-changes')) | ||||
def new(self, repo, patchfn, *pats, **opts): | def new(self, repo, patchfn, *pats, **opts): | ||||
"""options: | """options: | ||||
msg: a string or a no-argument function returning a string | msg: a string or a no-argument function returning a string | ||||
""" | """ | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
msg = opts.get(b'msg') | msg = opts.get(b'msg') | ||||
edit = opts.get(b'edit') | edit = opts.get(b'edit') | ||||
editform = opts.get(b'editform', b'mq.qnew') | editform = opts.get(b'editform', b'mq.qnew') | ||||
user = opts.get(b'user') | user = opts.get(b'user') | ||||
date = opts.get(b'date') | date = opts.get(b'date') | ||||
if date: | if date: | ||||
tobackup=tobackup, | tobackup=tobackup, | ||||
keepchanges=keepchanges, | keepchanges=keepchanges, | ||||
) | ) | ||||
except AbortNoCleanup: | except AbortNoCleanup: | ||||
raise | raise | ||||
except: # re-raises | except: # re-raises | ||||
self.ui.warn(_(b'cleaning up working directory...\n')) | self.ui.warn(_(b'cleaning up working directory...\n')) | ||||
cmdutil.revert( | cmdutil.revert( | ||||
self.ui, repo, repo[b'.'], no_backup=True, | self.ui, | ||||
repo, | |||||
repo[b'.'], | |||||
no_backup=True, | |||||
) | ) | ||||
# only remove unknown files that we know we touched or | # only remove unknown files that we know we touched or | ||||
# created while patching | # created while patching | ||||
for f in all_files: | for f in all_files: | ||||
if f not in repo.dirstate: | if f not in repo.dirstate: | ||||
repo.wvfs.unlinkpath(f, ignoremissing=True) | repo.wvfs.unlinkpath(f, ignoremissing=True) | ||||
self.ui.warn(_(b'done\n')) | self.ui.warn(_(b'done\n')) | ||||
raise | raise | ||||
), | ), | ||||
] | ] | ||||
+ cmdutil.remoteopts, | + cmdutil.remoteopts, | ||||
_(b'hg qclone [OPTION]... SOURCE [DEST]'), | _(b'hg qclone [OPTION]... SOURCE [DEST]'), | ||||
helpcategory=command.CATEGORY_REPO_CREATION, | helpcategory=command.CATEGORY_REPO_CREATION, | ||||
norepo=True, | norepo=True, | ||||
) | ) | ||||
def clone(ui, source, dest=None, **opts): | def clone(ui, source, dest=None, **opts): | ||||
'''clone main and patch repository at same time | """clone main and patch repository at same time | ||||
If source is local, destination will have no patches applied. If | If source is local, destination will have no patches applied. If | ||||
source is remote, this command can not check if patches are | source is remote, this command can not check if patches are | ||||
applied in source, so cannot guarantee that patches are not | applied in source, so cannot guarantee that patches are not | ||||
applied in destination. If you clone remote repository, be sure | applied in destination. If you clone remote repository, be sure | ||||
before that it has no patches applied. | before that it has no patches applied. | ||||
Source patch repository is looked for in <src>/.hg/patches by | Source patch repository is looked for in <src>/.hg/patches by | ||||
default. Use -p <url> to change. | default. Use -p <url> to change. | ||||
The patch directory must be a nested Mercurial repository, as | The patch directory must be a nested Mercurial repository, as | ||||
would be created by :hg:`init --mq`. | would be created by :hg:`init --mq`. | ||||
Return 0 on success. | Return 0 on success. | ||||
''' | """ | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
def patchdir(repo): | def patchdir(repo): | ||||
"""compute a patch repo url from a repo object""" | """compute a patch repo url from a repo object""" | ||||
url = repo.url() | url = repo.url() | ||||
if url.endswith(b'/'): | if url.endswith(b'/'): | ||||
url = url[:-1] | url = url[:-1] | ||||
return url + b'/.hg/patches' | return url + b'/.hg/patches' | ||||
r = q.qrepo() | r = q.qrepo() | ||||
if not r: | if not r: | ||||
raise error.Abort(b'no queue repository') | raise error.Abort(b'no queue repository') | ||||
commands.commit(r.ui, r, *pats, **opts) | commands.commit(r.ui, r, *pats, **opts) | ||||
@command( | @command( | ||||
b"qseries", | b"qseries", | ||||
[(b'm', b'missing', None, _(b'print patches not in series')),] + seriesopts, | [ | ||||
(b'm', b'missing', None, _(b'print patches not in series')), | |||||
] | |||||
+ seriesopts, | |||||
_(b'hg qseries [-ms]'), | _(b'hg qseries [-ms]'), | ||||
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | ||||
) | ) | ||||
def series(ui, repo, **opts): | def series(ui, repo, **opts): | ||||
"""print the entire series file | """print the entire series file | ||||
Returns 0 on success.""" | Returns 0 on success.""" | ||||
repo.mq.qseries( | repo.mq.qseries( | ||||
), | ), | ||||
(b'f', b'force', None, _(b'overwrite any local changes')), | (b'f', b'force', None, _(b'overwrite any local changes')), | ||||
(b'', b'no-backup', None, _(b'do not save backup copies of files')), | (b'', b'no-backup', None, _(b'do not save backup copies of files')), | ||||
], | ], | ||||
_(b'hg qgoto [OPTION]... PATCH'), | _(b'hg qgoto [OPTION]... PATCH'), | ||||
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | ||||
) | ) | ||||
def goto(ui, repo, patch, **opts): | def goto(ui, repo, patch, **opts): | ||||
'''push or pop patches until named patch is at top of stack | """push or pop patches until named patch is at top of stack | ||||
Returns 0 on success.''' | Returns 0 on success.""" | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
opts = fixkeepchangesopts(ui, opts) | opts = fixkeepchangesopts(ui, opts) | ||||
q = repo.mq | q = repo.mq | ||||
patch = q.lookup(patch) | patch = q.lookup(patch) | ||||
nobackup = opts.get(b'no_backup') | nobackup = opts.get(b'no_backup') | ||||
keepchanges = opts.get(b'keep_changes') | keepchanges = opts.get(b'keep_changes') | ||||
if q.isapplied(patch): | if q.isapplied(patch): | ||||
ret = q.pop( | ret = q.pop( | ||||
[ | [ | ||||
(b'l', b'list', None, _(b'list all patches and guards')), | (b'l', b'list', None, _(b'list all patches and guards')), | ||||
(b'n', b'none', None, _(b'drop all guards')), | (b'n', b'none', None, _(b'drop all guards')), | ||||
], | ], | ||||
_(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'), | _(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'), | ||||
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | ||||
) | ) | ||||
def guard(ui, repo, *args, **opts): | def guard(ui, repo, *args, **opts): | ||||
'''set or print guards for a patch | """set or print guards for a patch | ||||
Guards control whether a patch can be pushed. A patch with no | Guards control whether a patch can be pushed. A patch with no | ||||
guards is always pushed. A patch with a positive guard ("+foo") is | guards is always pushed. A patch with a positive guard ("+foo") is | ||||
pushed only if the :hg:`qselect` command has activated it. A patch with | pushed only if the :hg:`qselect` command has activated it. A patch with | ||||
a negative guard ("-foo") is never pushed if the :hg:`qselect` command | a negative guard ("-foo") is never pushed if the :hg:`qselect` command | ||||
has activated it. | has activated it. | ||||
With no arguments, print the currently active guards. | With no arguments, print the currently active guards. | ||||
With arguments, set guards for the named patch. | With arguments, set guards for the named patch. | ||||
.. note:: | .. note:: | ||||
Specifying negative guards now requires '--'. | Specifying negative guards now requires '--'. | ||||
To set guards on another patch:: | To set guards on another patch:: | ||||
hg qguard other.patch -- +2.6.17 -stable | hg qguard other.patch -- +2.6.17 -stable | ||||
Returns 0 on success. | Returns 0 on success. | ||||
''' | """ | ||||
def status(idx): | def status(idx): | ||||
guards = q.seriesguards[idx] or [b'unguarded'] | guards = q.seriesguards[idx] or [b'unguarded'] | ||||
if q.series[idx] in applied: | if q.series[idx] in applied: | ||||
state = b'applied' | state = b'applied' | ||||
elif q.pushable(idx)[0]: | elif q.pushable(idx)[0]: | ||||
state = b'unapplied' | state = b'unapplied' | ||||
else: | else: | ||||
(b's', b'series', None, _(b'list all guards in series file')), | (b's', b'series', None, _(b'list all guards in series file')), | ||||
(b'', b'pop', None, _(b'pop to before first guarded applied patch')), | (b'', b'pop', None, _(b'pop to before first guarded applied patch')), | ||||
(b'', b'reapply', None, _(b'pop, then reapply patches')), | (b'', b'reapply', None, _(b'pop, then reapply patches')), | ||||
], | ], | ||||
_(b'hg qselect [OPTION]... [GUARD]...'), | _(b'hg qselect [OPTION]... [GUARD]...'), | ||||
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | ||||
) | ) | ||||
def select(ui, repo, *args, **opts): | def select(ui, repo, *args, **opts): | ||||
'''set or print guarded patches to push | """set or print guarded patches to push | ||||
Use the :hg:`qguard` command to set or print guards on patch, then use | Use the :hg:`qguard` command to set or print guards on patch, then use | ||||
qselect to tell mq which guards to use. A patch will be pushed if | qselect to tell mq which guards to use. A patch will be pushed if | ||||
it has no guards or any positive guards match the currently | it has no guards or any positive guards match the currently | ||||
selected guard, but will not be pushed if any negative guards | selected guard, but will not be pushed if any negative guards | ||||
match the current guard. For example:: | match the current guard. For example:: | ||||
qguard foo.patch -- -stable (negative guard) | qguard foo.patch -- -stable (negative guard) | ||||
guarded patches by default. Use --pop to pop back to the last | guarded patches by default. Use --pop to pop back to the last | ||||
applied patch that is not guarded. Use --reapply (which implies | applied patch that is not guarded. Use --reapply (which implies | ||||
--pop) to push back to the current patch afterwards, but skip | --pop) to push back to the current patch afterwards, but skip | ||||
guarded patches. | guarded patches. | ||||
Use -s/--series to print a list of all guards in the series file | Use -s/--series to print a list of all guards in the series file | ||||
(no other arguments needed). Use -v for more information. | (no other arguments needed). Use -v for more information. | ||||
Returns 0 on success.''' | Returns 0 on success.""" | ||||
q = repo.mq | q = repo.mq | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
guards = q.active() | guards = q.active() | ||||
pushable = lambda i: q.pushable(q.applied[i].name)[0] | pushable = lambda i: q.pushable(q.applied[i].name)[0] | ||||
if args or opts.get(b'none'): | if args or opts.get(b'none'): | ||||
old_unapplied = q.unapplied(repo) | old_unapplied = q.unapplied(repo) | ||||
old_guarded = [ | old_guarded = [ | ||||
(b'', b'rename', False, _(b'rename active queue')), | (b'', b'rename', False, _(b'rename active queue')), | ||||
(b'', b'delete', False, _(b'delete reference to queue')), | (b'', b'delete', False, _(b'delete reference to queue')), | ||||
(b'', b'purge', False, _(b'delete queue, and remove patch dir')), | (b'', b'purge', False, _(b'delete queue, and remove patch dir')), | ||||
], | ], | ||||
_(b'[OPTION] [QUEUE]'), | _(b'[OPTION] [QUEUE]'), | ||||
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | ||||
) | ) | ||||
def qqueue(ui, repo, name=None, **opts): | def qqueue(ui, repo, name=None, **opts): | ||||
'''manage multiple patch queues | """manage multiple patch queues | ||||
Supports switching between different patch queues, as well as creating | Supports switching between different patch queues, as well as creating | ||||
new patch queues and deleting existing ones. | new patch queues and deleting existing ones. | ||||
Omitting a queue name or specifying -l/--list will show you the registered | Omitting a queue name or specifying -l/--list will show you the registered | ||||
queues - by default the "normal" patches queue is registered. The currently | queues - by default the "normal" patches queue is registered. The currently | ||||
active queue will be marked with "(active)". Specifying --active will print | active queue will be marked with "(active)". Specifying --active will print | ||||
only the name of the active queue. | only the name of the active queue. | ||||
To create a new queue, use -c/--create. The queue is automatically made | To create a new queue, use -c/--create. The queue is automatically made | ||||
active, except in the case where there are applied patches from the | active, except in the case where there are applied patches from the | ||||
currently active queue in the repository. Then the queue will only be | currently active queue in the repository. Then the queue will only be | ||||
created and switching will fail. | created and switching will fail. | ||||
To delete an existing queue, use --delete. You cannot delete the currently | To delete an existing queue, use --delete. You cannot delete the currently | ||||
active queue. | active queue. | ||||
Returns 0 on success. | Returns 0 on success. | ||||
''' | """ | ||||
q = repo.mq | q = repo.mq | ||||
_defaultqueue = b'patches' | _defaultqueue = b'patches' | ||||
_allqueues = b'patches.queues' | _allqueues = b'patches.queues' | ||||
_activequeue = b'patches.queue' | _activequeue = b'patches.queue' | ||||
def _getcurrent(): | def _getcurrent(): | ||||
cur = os.path.basename(q.path) | cur = os.path.basename(q.path) | ||||
if cur.startswith(b'patches-'): | if cur.startswith(b'patches-'): | ||||
ui.note(_(b"mq: (empty queue)\n")) | ui.note(_(b"mq: (empty queue)\n")) | ||||
revsetpredicate = registrar.revsetpredicate() | revsetpredicate = registrar.revsetpredicate() | ||||
@revsetpredicate(b'mq()') | @revsetpredicate(b'mq()') | ||||
def revsetmq(repo, subset, x): | def revsetmq(repo, subset, x): | ||||
"""Changesets managed by MQ. | """Changesets managed by MQ.""" | ||||
""" | |||||
revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments")) | revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments")) | ||||
applied = {repo[r.node].rev() for r in repo.mq.applied} | applied = {repo[r.node].rev() for r in repo.mq.applied} | ||||
return smartset.baseset([r for r in subset if r in applied]) | return smartset.baseset([r for r in subset if r in applied]) | ||||
# tell hggettext to extract docstrings from these functions: | # tell hggettext to extract docstrings from these functions: | ||||
i18nfunctions = [revsetmq] | i18nfunctions = [revsetmq] | ||||
version, | version, | ||||
common, | common, | ||||
heads, | heads, | ||||
kwargs.get('depth', None), | kwargs.get('depth', None), | ||||
) | ) | ||||
def generateellipsesbundle2( | def generateellipsesbundle2( | ||||
bundler, repo, include, exclude, version, common, heads, depth, | bundler, | ||||
repo, | |||||
include, | |||||
exclude, | |||||
version, | |||||
common, | |||||
heads, | |||||
depth, | |||||
): | ): | ||||
match = narrowspec.match(repo.root, include=include, exclude=exclude) | match = narrowspec.match(repo.root, include=include, exclude=exclude) | ||||
if depth is not None: | if depth is not None: | ||||
depth = int(depth) | depth = int(depth) | ||||
if depth < 1: | if depth < 1: | ||||
raise error.Abort(_(b'depth must be positive, got %d') % depth) | raise error.Abort(_(b'depth must be positive, got %d') % depth) | ||||
heads = set(heads or repo.heads()) | heads = set(heads or repo.heads()) | ||||
part = bundler.newpart(b'changegroup', data=cgdata) | part = bundler.newpart(b'changegroup', data=cgdata) | ||||
part.addparam(b'version', version) | part.addparam(b'version', version) | ||||
if scmutil.istreemanifest(repo): | if scmutil.istreemanifest(repo): | ||||
part.addparam(b'treemanifest', b'1') | part.addparam(b'treemanifest', b'1') | ||||
def generate_ellipses_bundle2_for_widening( | def generate_ellipses_bundle2_for_widening( | ||||
bundler, repo, oldmatch, newmatch, version, common, known, | bundler, | ||||
repo, | |||||
oldmatch, | |||||
newmatch, | |||||
version, | |||||
common, | |||||
known, | |||||
): | ): | ||||
common = set(common or [nullid]) | common = set(common or [nullid]) | ||||
# Steps: | # Steps: | ||||
# 1. Send kill for "$known & ::common" | # 1. Send kill for "$known & ::common" | ||||
# | # | ||||
# 2. Send changegroup for ::common | # 2. Send changegroup for ::common | ||||
# | # | ||||
# 3. Proceed. | # 3. Proceed. |
newmatch, | newmatch, | ||||
common, | common, | ||||
known, | known, | ||||
cgversion, | cgversion, | ||||
ellipses, | ellipses, | ||||
) | ) | ||||
else: | else: | ||||
narrowbundle2.generate_ellipses_bundle2_for_widening( | narrowbundle2.generate_ellipses_bundle2_for_widening( | ||||
bundler, repo, oldmatch, newmatch, cgversion, common, known, | bundler, | ||||
repo, | |||||
oldmatch, | |||||
newmatch, | |||||
cgversion, | |||||
common, | |||||
known, | |||||
) | ) | ||||
except error.Abort as exc: | except error.Abort as exc: | ||||
bundler = bundle2.bundle20(repo.ui) | bundler = bundle2.bundle20(repo.ui) | ||||
manargs = [(b'message', exc.message)] | manargs = [(b'message', exc.message)] | ||||
advargs = [] | advargs = [] | ||||
if exc.hint is not None: | if exc.hint is not None: | ||||
advargs.append((b'hint', exc.hint)) | advargs.append((b'hint', exc.hint)) | ||||
bundler.addpart(bundle2.bundlepart(b'error:abort', manargs, advargs)) | bundler.addpart(bundle2.bundlepart(b'error:abort', manargs, advargs)) |
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'notify', b'changegroup', default=None, | b'notify', | ||||
b'changegroup', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'config', default=None, | b'notify', | ||||
b'config', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'diffstat', default=True, | b'notify', | ||||
b'diffstat', | |||||
default=True, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'domain', default=None, | b'notify', | ||||
b'domain', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'messageidseed', default=None, | b'notify', | ||||
b'messageidseed', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'fromauthor', default=None, | b'notify', | ||||
b'fromauthor', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'incoming', default=None, | b'notify', | ||||
b'incoming', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'maxdiff', default=300, | b'notify', | ||||
b'maxdiff', | |||||
default=300, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'maxdiffstat', default=-1, | b'notify', | ||||
b'maxdiffstat', | |||||
default=-1, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'maxsubject', default=67, | b'notify', | ||||
b'maxsubject', | |||||
default=67, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'mbox', default=None, | b'notify', | ||||
b'mbox', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'merge', default=True, | b'notify', | ||||
b'merge', | |||||
default=True, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'outgoing', default=None, | b'notify', | ||||
b'outgoing', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'reply-to-predecessor', default=False, | b'notify', | ||||
b'reply-to-predecessor', | |||||
default=False, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'sources', default=b'serve', | b'notify', | ||||
b'sources', | |||||
default=b'serve', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'showfunc', default=None, | b'notify', | ||||
b'showfunc', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'strip', default=0, | b'notify', | ||||
b'strip', | |||||
default=0, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'style', default=None, | b'notify', | ||||
b'style', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'template', default=None, | b'notify', | ||||
b'template', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'notify', b'test', default=True, | b'notify', | ||||
b'test', | |||||
default=True, | |||||
) | ) | ||||
# template for single changeset can include email headers. | # template for single changeset can include email headers. | ||||
single_template = b''' | single_template = b''' | ||||
Subject: changeset in {webroot}: {desc|firstline|strip} | Subject: changeset in {webroot}: {desc|firstline|strip} | ||||
From: {author} | From: {author} | ||||
changeset {node|short} in {root} | changeset {node|short} in {root} | ||||
difflines = difflines[:maxdiff] | difflines = difflines[:maxdiff] | ||||
elif difflines: | elif difflines: | ||||
self.ui.write(_(b'\ndiffs (%d lines):\n\n') % len(difflines)) | self.ui.write(_(b'\ndiffs (%d lines):\n\n') % len(difflines)) | ||||
self.ui.write(b"\n".join(difflines)) | self.ui.write(b"\n".join(difflines)) | ||||
def hook(ui, repo, hooktype, node=None, source=None, **kwargs): | def hook(ui, repo, hooktype, node=None, source=None, **kwargs): | ||||
'''send email notifications to interested subscribers. | """send email notifications to interested subscribers. | ||||
if used as changegroup hook, send one email for all changesets in | if used as changegroup hook, send one email for all changesets in | ||||
changegroup. else send one email per changeset.''' | changegroup. else send one email per changeset.""" | ||||
n = notifier(ui, repo, hooktype) | n = notifier(ui, repo, hooktype) | ||||
ctx = repo.unfiltered()[node] | ctx = repo.unfiltered()[node] | ||||
if not n.subs: | if not n.subs: | ||||
ui.debug(b'notify: no subscribers to repository %s\n' % n.root) | ui.debug(b'notify: no subscribers to repository %s\n' % n.root) | ||||
return | return | ||||
if n.skipsource(source): | if n.skipsource(source): |
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'pager', b'attend', default=lambda: attended, | b'pager', | ||||
b'attend', | |||||
default=lambda: attended, | |||||
) | ) | ||||
def uisetup(ui): | def uisetup(ui): | ||||
def pagecmd(orig, ui, options, cmd, cmdfunc): | def pagecmd(orig, ui, options, cmd, cmdfunc): | ||||
auto = options[b'pager'] == b'auto' | auto = options[b'pager'] == b'auto' | ||||
if auto and not ui.pageractive: | if auto and not ui.pageractive: | ||||
usepager = False | usepager = False |
cmdtable = {} | cmdtable = {} | ||||
command = registrar.command(cmdtable) | command = registrar.command(cmdtable) | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'patchbomb', b'bundletype', default=None, | b'patchbomb', | ||||
b'bundletype', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'patchbomb', b'bcc', default=None, | b'patchbomb', | ||||
b'bcc', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'patchbomb', b'cc', default=None, | b'patchbomb', | ||||
b'cc', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'patchbomb', b'confirm', default=False, | b'patchbomb', | ||||
b'confirm', | |||||
default=False, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'patchbomb', b'flagtemplate', default=None, | b'patchbomb', | ||||
b'flagtemplate', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'patchbomb', b'from', default=None, | b'patchbomb', | ||||
b'from', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'patchbomb', b'intro', default=b'auto', | b'patchbomb', | ||||
b'intro', | |||||
default=b'auto', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'patchbomb', b'publicurl', default=None, | b'patchbomb', | ||||
b'publicurl', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'patchbomb', b'reply-to', default=None, | b'patchbomb', | ||||
b'reply-to', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'patchbomb', b'to', default=None, | b'patchbomb', | ||||
b'to', | |||||
default=None, | |||||
) | ) | ||||
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | ||||
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | ||||
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
def _addpullheader(seq, ctx): | def _addpullheader(seq, ctx): | ||||
"""Add a header pointing to a public URL where the changeset is available | """Add a header pointing to a public URL where the changeset is available""" | ||||
""" | |||||
repo = ctx.repo() | repo = ctx.repo() | ||||
# experimental config: patchbomb.publicurl | # experimental config: patchbomb.publicurl | ||||
# waiting for some logic that check that the changeset are available on the | # waiting for some logic that check that the changeset are available on the | ||||
# destination before patchbombing anything. | # destination before patchbombing anything. | ||||
publicurl = repo.ui.config(b'patchbomb', b'publicurl') | publicurl = repo.ui.config(b'patchbomb', b'publicurl') | ||||
if publicurl: | if publicurl: | ||||
return b'Available At %s\n# hg pull %s -r %s' % ( | return b'Available At %s\n# hg pull %s -r %s' % ( | ||||
publicurl, | publicurl, | ||||
), | ), | ||||
] | ] | ||||
+ emailopts | + emailopts | ||||
+ cmdutil.remoteopts, | + cmdutil.remoteopts, | ||||
_(b'hg email [OPTION]... [DEST]...'), | _(b'hg email [OPTION]... [DEST]...'), | ||||
helpcategory=command.CATEGORY_IMPORT_EXPORT, | helpcategory=command.CATEGORY_IMPORT_EXPORT, | ||||
) | ) | ||||
def email(ui, repo, *revs, **opts): | def email(ui, repo, *revs, **opts): | ||||
'''send changesets by email | """send changesets by email | ||||
By default, diffs are sent in the format generated by | By default, diffs are sent in the format generated by | ||||
:hg:`export`, one per message. The series starts with a "[PATCH 0 | :hg:`export`, one per message. The series starts with a "[PATCH 0 | ||||
of N]" introduction, which describes the series as a whole. | of N]" introduction, which describes the series as a whole. | ||||
Each patch email has a Subject line of "[PATCH M of N] ...", using | Each patch email has a Subject line of "[PATCH M of N] ...", using | ||||
the first line of the changeset description as the subject text. | the first line of the changeset description as the subject text. | ||||
The message contains two or three parts. First, the changeset | The message contains two or three parts. First, the changeset | ||||
hg email -o -m mbox && # generate an mbox file... | hg email -o -m mbox && # generate an mbox file... | ||||
mutt -R -f mbox # ... and view it with mutt | mutt -R -f mbox # ... and view it with mutt | ||||
hg email -o -m mbox && # generate an mbox file ... | hg email -o -m mbox && # generate an mbox file ... | ||||
formail -s sendmail \\ # ... and use formail to send from the mbox | formail -s sendmail \\ # ... and use formail to send from the mbox | ||||
-bm -t < mbox # ... using sendmail | -bm -t < mbox # ... using sendmail | ||||
Before using this command, you will need to enable email in your | Before using this command, you will need to enable email in your | ||||
hgrc. See the [email] section in hgrc(5) for details. | hgrc. See the [email] section in hgrc(5) for details. | ||||
''' | """ | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
_charsets = mail._charsets(ui) | _charsets = mail._charsets(ui) | ||||
bundle = opts.get(b'bundle') | bundle = opts.get(b'bundle') | ||||
date = opts.get(b'date') | date = opts.get(b'date') | ||||
mbox = opts.get(b'mbox') | mbox = opts.get(b'mbox') | ||||
outgoing = opts.get(b'outgoing') | outgoing = opts.get(b'outgoing') |
cmdtable = eh.cmdtable | cmdtable = eh.cmdtable | ||||
command = eh.command | command = eh.command | ||||
configtable = eh.configtable | configtable = eh.configtable | ||||
templatekeyword = eh.templatekeyword | templatekeyword = eh.templatekeyword | ||||
uisetup = eh.finaluisetup | uisetup = eh.finaluisetup | ||||
# developer config: phabricator.batchsize | # developer config: phabricator.batchsize | ||||
eh.configitem( | eh.configitem( | ||||
b'phabricator', b'batchsize', default=12, | b'phabricator', | ||||
b'batchsize', | |||||
default=12, | |||||
) | ) | ||||
eh.configitem( | eh.configitem( | ||||
b'phabricator', b'callsign', default=None, | b'phabricator', | ||||
b'callsign', | |||||
default=None, | |||||
) | ) | ||||
eh.configitem( | eh.configitem( | ||||
b'phabricator', b'curlcmd', default=None, | b'phabricator', | ||||
b'curlcmd', | |||||
default=None, | |||||
) | ) | ||||
# developer config: phabricator.debug | # developer config: phabricator.debug | ||||
eh.configitem( | eh.configitem( | ||||
b'phabricator', b'debug', default=False, | b'phabricator', | ||||
b'debug', | |||||
default=False, | |||||
) | ) | ||||
# developer config: phabricator.repophid | # developer config: phabricator.repophid | ||||
eh.configitem( | eh.configitem( | ||||
b'phabricator', b'repophid', default=None, | b'phabricator', | ||||
b'repophid', | |||||
default=None, | |||||
) | ) | ||||
eh.configitem( | eh.configitem( | ||||
b'phabricator', b'url', default=None, | b'phabricator', | ||||
b'url', | |||||
default=None, | |||||
) | ) | ||||
eh.configitem( | eh.configitem( | ||||
b'phabsend', b'confirm', default=False, | b'phabsend', | ||||
b'confirm', | |||||
default=False, | |||||
) | ) | ||||
eh.configitem( | eh.configitem( | ||||
b'phabimport', b'secret', default=False, | b'phabimport', | ||||
b'secret', | |||||
default=False, | |||||
) | ) | ||||
eh.configitem( | eh.configitem( | ||||
b'phabimport', b'obsolete', default=False, | b'phabimport', | ||||
b'obsolete', | |||||
default=False, | |||||
) | ) | ||||
colortable = { | colortable = { | ||||
b'phabricator.action.created': b'green', | b'phabricator.action.created': b'green', | ||||
b'phabricator.action.skipped': b'magenta', | b'phabricator.action.skipped': b'magenta', | ||||
b'phabricator.action.updated': b'magenta', | b'phabricator.action.updated': b'magenta', | ||||
b'phabricator.drev': b'bold', | b'phabricator.drev': b'bold', | ||||
b'phabricator.status.abandoned': b'magenta dim', | b'phabricator.status.abandoned': b'magenta dim', | ||||
b' (ADVANCED)' | b' (ADVANCED)' | ||||
), | ), | ||||
), | ), | ||||
] | ] | ||||
@eh.wrapfunction(localrepo, "loadhgrc") | @eh.wrapfunction(localrepo, "loadhgrc") | ||||
def _loadhgrc(orig, ui, wdirvfs, hgvfs, requirements, *args, **opts): | def _loadhgrc(orig, ui, wdirvfs, hgvfs, requirements, *args, **opts): | ||||
"""Load ``.arcconfig`` content into a ui instance on repository open. | """Load ``.arcconfig`` content into a ui instance on repository open.""" | ||||
""" | |||||
result = False | result = False | ||||
arcconfig = {} | arcconfig = {} | ||||
try: | try: | ||||
# json.loads only accepts bytes from 3.6+ | # json.loads only accepts bytes from 3.6+ | ||||
rawparams = encoding.unifromlocal(wdirvfs.read(b".arcconfig")) | rawparams = encoding.unifromlocal(wdirvfs.read(b".arcconfig")) | ||||
# json.loads only returns unicode strings | # json.loads only returns unicode strings | ||||
arcconfig = pycompat.rapply( | arcconfig = pycompat.rapply( | ||||
class DiffFileType(object): | class DiffFileType(object): | ||||
TEXT = 1 | TEXT = 1 | ||||
IMAGE = 2 | IMAGE = 2 | ||||
BINARY = 3 | BINARY = 3 | ||||
@attr.s | @attr.s | ||||
class phabhunk(dict): | class phabhunk(dict): | ||||
"""Represents a Differential hunk, which is owned by a Differential change | """Represents a Differential hunk, which is owned by a Differential change""" | ||||
""" | |||||
oldOffset = attr.ib(default=0) # camelcase-required | oldOffset = attr.ib(default=0) # camelcase-required | ||||
oldLength = attr.ib(default=0) # camelcase-required | oldLength = attr.ib(default=0) # camelcase-required | ||||
newOffset = attr.ib(default=0) # camelcase-required | newOffset = attr.ib(default=0) # camelcase-required | ||||
newLength = attr.ib(default=0) # camelcase-required | newLength = attr.ib(default=0) # camelcase-required | ||||
corpus = attr.ib(default='') | corpus = attr.ib(default='') | ||||
# These get added to the phabchange's equivalents | # These get added to the phabchange's equivalents | ||||
addLines = attr.ib(default=0) # camelcase-required | addLines = attr.ib(default=0) # camelcase-required | ||||
% scmutil.formatchangeid(old) | % scmutil.formatchangeid(old) | ||||
) | ) | ||||
continue | continue | ||||
parents = [ | parents = [ | ||||
mapping.get(old.p1().node(), (old.p1(),))[0], | mapping.get(old.p1().node(), (old.p1(),))[0], | ||||
mapping.get(old.p2().node(), (old.p2(),))[0], | mapping.get(old.p2().node(), (old.p2(),))[0], | ||||
] | ] | ||||
newdesc = rewriteutil.update_hash_refs( | newdesc = rewriteutil.update_hash_refs( | ||||
repo, newdesc, mapping, | repo, | ||||
newdesc, | |||||
mapping, | |||||
) | ) | ||||
new = context.metadataonlyctx( | new = context.metadataonlyctx( | ||||
repo, | repo, | ||||
old, | old, | ||||
parents=parents, | parents=parents, | ||||
text=newdesc, | text=newdesc, | ||||
user=old.user(), | user=old.user(), | ||||
date=old.date(), | date=old.date(), | ||||
def template_review(context, mapping): | def template_review(context, mapping): | ||||
""":phabreview: Object describing the review for this changeset. | """:phabreview: Object describing the review for this changeset. | ||||
Has attributes `url` and `id`. | Has attributes `url` and `id`. | ||||
""" | """ | ||||
ctx = context.resource(mapping, b'ctx') | ctx = context.resource(mapping, b'ctx') | ||||
m = _differentialrevisiondescre.search(ctx.description()) | m = _differentialrevisiondescre.search(ctx.description()) | ||||
if m: | if m: | ||||
return templateutil.hybriddict( | return templateutil.hybriddict( | ||||
{b'url': m.group('url'), b'id': b"D%s" % m.group('id'),} | { | ||||
b'url': m.group('url'), | |||||
b'id': b"D%s" % m.group('id'), | |||||
} | |||||
) | ) | ||||
else: | else: | ||||
tags = ctx.repo().nodetags(ctx.node()) | tags = ctx.repo().nodetags(ctx.node()) | ||||
for t in tags: | for t in tags: | ||||
if _differentialrevisiontagre.match(t): | if _differentialrevisiontagre.match(t): | ||||
url = ctx.repo().ui.config(b'phabricator', b'url') | url = ctx.repo().ui.config(b'phabricator', b'url') | ||||
if not url.endswith(b'/'): | if not url.endswith(b'/'): | ||||
url += b'/' | url += b'/' | ||||
url += t | url += t | ||||
return templateutil.hybriddict({b'url': url, b'id': t,}) | return templateutil.hybriddict( | ||||
{ | |||||
b'url': url, | |||||
b'id': t, | |||||
} | |||||
) | |||||
return None | return None | ||||
@eh.templatekeyword(b'phabstatus', requires={b'ctx', b'repo', b'ui'}) | @eh.templatekeyword(b'phabstatus', requires={b'ctx', b'repo', b'ui'}) | ||||
def template_status(context, mapping): | def template_status(context, mapping): | ||||
""":phabstatus: String. Status of Phabricator differential. | """:phabstatus: String. Status of Phabricator differential.""" | ||||
""" | |||||
ctx = context.resource(mapping, b'ctx') | ctx = context.resource(mapping, b'ctx') | ||||
repo = context.resource(mapping, b'repo') | repo = context.resource(mapping, b'repo') | ||||
ui = context.resource(mapping, b'ui') | ui = context.resource(mapping, b'ui') | ||||
rev = ctx.rev() | rev = ctx.rev() | ||||
try: | try: | ||||
drevid = getdrevmap(repo, [rev])[rev] | drevid = getdrevmap(repo, [rev])[rev] | ||||
except KeyError: | except KeyError: | ||||
return None | return None | ||||
drevs = callconduit(ui, b'differential.query', {b'ids': [drevid]}) | drevs = callconduit(ui, b'differential.query', {b'ids': [drevid]}) | ||||
for drev in drevs: | for drev in drevs: | ||||
if int(drev[b'id']) == drevid: | if int(drev[b'id']) == drevid: | ||||
return templateutil.hybriddict( | return templateutil.hybriddict( | ||||
{b'url': drev[b'uri'], b'status': drev[b'statusName'],} | { | ||||
b'url': drev[b'uri'], | |||||
b'status': drev[b'statusName'], | |||||
} | |||||
) | ) | ||||
return None | return None | ||||
@show.showview(b'phabstatus', csettopic=b'work') | @show.showview(b'phabstatus', csettopic=b'work') | ||||
def phabstatusshowview(ui, repo, displayer): | def phabstatusshowview(ui, repo, displayer): | ||||
"""Phabricator differiential status""" | """Phabricator differiential status""" | ||||
revs = repo.revs('sort(_underway(), topo)') | revs = repo.revs('sort(_underway(), topo)') |
), | ), | ||||
), | ), | ||||
] | ] | ||||
+ cmdutil.walkopts, | + cmdutil.walkopts, | ||||
_(b'hg purge [OPTION]... [DIR]...'), | _(b'hg purge [OPTION]... [DIR]...'), | ||||
helpcategory=command.CATEGORY_WORKING_DIRECTORY, | helpcategory=command.CATEGORY_WORKING_DIRECTORY, | ||||
) | ) | ||||
def purge(ui, repo, *dirs, **opts): | def purge(ui, repo, *dirs, **opts): | ||||
'''removes files not tracked by Mercurial | """removes files not tracked by Mercurial | ||||
Delete files not known to Mercurial. This is useful to test local | Delete files not known to Mercurial. This is useful to test local | ||||
and uncommitted changes in an otherwise-clean source tree. | and uncommitted changes in an otherwise-clean source tree. | ||||
This means that purge will delete the following by default: | This means that purge will delete the following by default: | ||||
- Unknown files: files marked with "?" by :hg:`status` | - Unknown files: files marked with "?" by :hg:`status` | ||||
- Empty directories: in fact Mercurial ignores directories unless | - Empty directories: in fact Mercurial ignores directories unless | ||||
If directories are given on the command line, only files in these | If directories are given on the command line, only files in these | ||||
directories are considered. | directories are considered. | ||||
Be careful with purge, as you could irreversibly delete some files | Be careful with purge, as you could irreversibly delete some files | ||||
you forgot to add to the repository. If you only want to print the | you forgot to add to the repository. If you only want to print the | ||||
list of files that this program would delete, use the --print | list of files that this program would delete, use the --print | ||||
option. | option. | ||||
''' | """ | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
cmdutil.check_at_most_one_arg(opts, b'all', b'ignored') | cmdutil.check_at_most_one_arg(opts, b'all', b'ignored') | ||||
act = not opts.get(b'print') | act = not opts.get(b'print') | ||||
eol = b'\n' | eol = b'\n' | ||||
if opts.get(b'print0'): | if opts.get(b'print0'): | ||||
eol = b'\0' | eol = b'\0' | ||||
act = False # --print0 implies --print | act = False # --print0 implies --print |
self.obsoletewithoutsuccessorindestination, | self.obsoletewithoutsuccessorindestination, | ||||
) | ) | ||||
for rev in sortedrevs: | for rev in sortedrevs: | ||||
self._rebasenode(tr, rev, allowdivergence, progress) | self._rebasenode(tr, rev, allowdivergence, progress) | ||||
p.complete() | p.complete() | ||||
ui.note(_(b'rebase merging completed\n')) | ui.note(_(b'rebase merging completed\n')) | ||||
def _concludenode(self, rev, editor, commitmsg=None): | def _concludenode(self, rev, editor, commitmsg=None): | ||||
'''Commit the wd changes with parents p1 and p2. | """Commit the wd changes with parents p1 and p2. | ||||
Reuse commit info from rev but also store useful information in extra. | Reuse commit info from rev but also store useful information in extra. | ||||
Return node of committed revision.''' | Return node of committed revision.""" | ||||
repo = self.repo | repo = self.repo | ||||
ctx = repo[rev] | ctx = repo[rev] | ||||
if commitmsg is None: | if commitmsg is None: | ||||
commitmsg = ctx.description() | commitmsg = ctx.description() | ||||
# Skip replacement if collapsing, as that degenerates to p1 for all | # Skip replacement if collapsing, as that degenerates to p1 for all | ||||
# nodes. | # nodes. | ||||
if not self.collapsef: | if not self.collapsef: | ||||
_(b'starting dry-run rebase; repository will not be changed\n') | _(b'starting dry-run rebase; repository will not be changed\n') | ||||
) | ) | ||||
with repo.wlock(), repo.lock(): | with repo.wlock(), repo.lock(): | ||||
needsabort = True | needsabort = True | ||||
try: | try: | ||||
overrides = {(b'rebase', b'singletransaction'): True} | overrides = {(b'rebase', b'singletransaction'): True} | ||||
with ui.configoverride(overrides, b'rebase'): | with ui.configoverride(overrides, b'rebase'): | ||||
_origrebase( | _origrebase( | ||||
ui, repo, action, opts, rbsrt, | ui, | ||||
repo, | |||||
action, | |||||
opts, | |||||
rbsrt, | |||||
) | ) | ||||
except error.ConflictResolutionRequired: | except error.ConflictResolutionRequired: | ||||
ui.status(_(b'hit a merge conflict\n')) | ui.status(_(b'hit a merge conflict\n')) | ||||
return 1 | return 1 | ||||
except error.Abort: | except error.Abort: | ||||
needsabort = False | needsabort = False | ||||
raise | raise | ||||
else: | else: | ||||
b'unable to collapse on top of %d, there is more ' | b'unable to collapse on top of %d, there is more ' | ||||
b'than one external parent: %s' | b'than one external parent: %s' | ||||
) | ) | ||||
% (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents))) | % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents))) | ||||
) | ) | ||||
def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg): | def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg): | ||||
'''Commit the memory changes with parents p1 and p2. | """Commit the memory changes with parents p1 and p2. | ||||
Return node of committed revision.''' | Return node of committed revision.""" | ||||
# By convention, ``extra['branch']`` (set by extrafn) clobbers | # By convention, ``extra['branch']`` (set by extrafn) clobbers | ||||
# ``branch`` (used when passing ``--keepbranches``). | # ``branch`` (used when passing ``--keepbranches``). | ||||
branch = None | branch = None | ||||
if b'branch' in extra: | if b'branch' in extra: | ||||
branch = extra[b'branch'] | branch = extra[b'branch'] | ||||
# FIXME: We call _compact() because it's required to correctly detect | # FIXME: We call _compact() because it's required to correctly detect | ||||
# changed files. This was added to fix a regression shortly before the 5.5 | # changed files. This was added to fix a regression shortly before the 5.5 | ||||
if memctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'): | if memctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'): | ||||
return None | return None | ||||
commitres = repo.commitctx(memctx) | commitres = repo.commitctx(memctx) | ||||
wctx.clean() # Might be reused | wctx.clean() # Might be reused | ||||
return commitres | return commitres | ||||
def commitnode(repo, editor, extra, user, date, commitmsg): | def commitnode(repo, editor, extra, user, date, commitmsg): | ||||
'''Commit the wd changes with parents p1 and p2. | """Commit the wd changes with parents p1 and p2. | ||||
Return node of committed revision.''' | Return node of committed revision.""" | ||||
dsguard = util.nullcontextmanager() | dsguard = util.nullcontextmanager() | ||||
if not repo.ui.configbool(b'rebase', b'singletransaction'): | if not repo.ui.configbool(b'rebase', b'singletransaction'): | ||||
dsguard = dirstateguard.dirstateguard(repo, b'rebase') | dsguard = dirstateguard.dirstateguard(repo, b'rebase') | ||||
with dsguard: | with dsguard: | ||||
# Commit might fail if unresolved files exist | # Commit might fail if unresolved files exist | ||||
newnode = repo.commit( | newnode = repo.commit( | ||||
text=commitmsg, user=user, date=date, extra=extra, editor=editor | text=commitmsg, user=user, date=date, extra=extra, editor=editor | ||||
) | ) | ||||
result.append(r) | result.append(r) | ||||
if not result: | if not result: | ||||
raise error.Abort(_(b'source and destination form a cycle')) | raise error.Abort(_(b'source and destination form a cycle')) | ||||
srcset -= set(result) | srcset -= set(result) | ||||
yield result | yield result | ||||
def buildstate(repo, destmap, collapse): | def buildstate(repo, destmap, collapse): | ||||
'''Define which revisions are going to be rebased and where | """Define which revisions are going to be rebased and where | ||||
repo: repo | repo: repo | ||||
destmap: {srcrev: destrev} | destmap: {srcrev: destrev} | ||||
''' | """ | ||||
rebaseset = destmap.keys() | rebaseset = destmap.keys() | ||||
originalwd = repo[b'.'].rev() | originalwd = repo[b'.'].rev() | ||||
# This check isn't strictly necessary, since mq detects commits over an | # This check isn't strictly necessary, since mq detects commits over an | ||||
# applied patch. But it prevents messing up the working directory when | # applied patch. But it prevents messing up the working directory when | ||||
# a partially completed rebase is blocked by mq. | # a partially completed rebase is blocked by mq. | ||||
if b'qtip' in repo.tags(): | if b'qtip' in repo.tags(): | ||||
mqapplied = {repo[s.node].rev() for s in repo.mq.applied} | mqapplied = {repo[s.node].rev() for s in repo.mq.applied} |
b"record", | b"record", | ||||
# same options as commit + white space diff options | # same options as commit + white space diff options | ||||
[c for c in commands.table[b'commit|ci'][1][:] if c[1] != b"interactive"] | [c for c in commands.table[b'commit|ci'][1][:] if c[1] != b"interactive"] | ||||
+ cmdutil.diffwsopts, | + cmdutil.diffwsopts, | ||||
_(b'hg record [OPTION]... [FILE]...'), | _(b'hg record [OPTION]... [FILE]...'), | ||||
helpcategory=command.CATEGORY_COMMITTING, | helpcategory=command.CATEGORY_COMMITTING, | ||||
) | ) | ||||
def record(ui, repo, *pats, **opts): | def record(ui, repo, *pats, **opts): | ||||
'''interactively select changes to commit | """interactively select changes to commit | ||||
If a list of files is omitted, all changes reported by :hg:`status` | If a list of files is omitted, all changes reported by :hg:`status` | ||||
will be candidates for recording. | will be candidates for recording. | ||||
See :hg:`help dates` for a list of formats valid for -d/--date. | See :hg:`help dates` for a list of formats valid for -d/--date. | ||||
If using the text interface (see :hg:`help config`), | If using the text interface (see :hg:`help config`), | ||||
you will be prompted for whether to record changes to each | you will be prompted for whether to record changes to each | ||||
f - record remaining changes to this file | f - record remaining changes to this file | ||||
d - done, skip remaining changes and files | d - done, skip remaining changes and files | ||||
a - record all changes to all remaining files | a - record all changes to all remaining files | ||||
q - quit, recording no changes | q - quit, recording no changes | ||||
? - display help | ? - display help | ||||
This command is not available when committing a merge.''' | This command is not available when committing a merge.""" | ||||
if not ui.interactive(): | if not ui.interactive(): | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'running non-interactively, use %s instead') % b'commit' | _(b'running non-interactively, use %s instead') % b'commit' | ||||
) | ) | ||||
opts["interactive"] = True | opts["interactive"] = True | ||||
overrides = {(b'experimental', b'crecord'): False} | overrides = {(b'experimental', b'crecord'): False} | ||||
@command( | @command( | ||||
b'qrecord', | b'qrecord', | ||||
[], | [], | ||||
_(b'hg qrecord [OPTION]... PATCH [FILE]...'), | _(b'hg qrecord [OPTION]... PATCH [FILE]...'), | ||||
helpcategory=command.CATEGORY_COMMITTING, | helpcategory=command.CATEGORY_COMMITTING, | ||||
inferrepo=True, | inferrepo=True, | ||||
) | ) | ||||
def qrecord(ui, repo, patch, *pats, **opts): | def qrecord(ui, repo, patch, *pats, **opts): | ||||
'''interactively record a new patch | """interactively record a new patch | ||||
See :hg:`help qnew` & :hg:`help record` for more information and | See :hg:`help qnew` & :hg:`help record` for more information and | ||||
usage. | usage. | ||||
''' | """ | ||||
return _qrecord(b'qnew', ui, repo, patch, *pats, **opts) | return _qrecord(b'qnew', ui, repo, patch, *pats, **opts) | ||||
def _qrecord(cmdsuggest, ui, repo, patch, *pats, **opts): | def _qrecord(cmdsuggest, ui, repo, patch, *pats, **opts): | ||||
try: | try: | ||||
mq = extensions.find(b'mq') | mq = extensions.find(b'mq') | ||||
except KeyError: | except KeyError: | ||||
raise error.Abort(_(b"'mq' extension not loaded")) | raise error.Abort(_(b"'mq' extension not loaded")) |
def getrenamedfn(orig, repo, endrev=None): | def getrenamedfn(orig, repo, endrev=None): | ||||
if not isenabled(repo) or copies.usechangesetcentricalgo(repo): | if not isenabled(repo) or copies.usechangesetcentricalgo(repo): | ||||
return orig(repo, endrev) | return orig(repo, endrev) | ||||
rcache = {} | rcache = {} | ||||
def getrenamed(fn, rev): | def getrenamed(fn, rev): | ||||
'''looks up all renames for a file (up to endrev) the first | """looks up all renames for a file (up to endrev) the first | ||||
time the file is given. It indexes on the changerev and only | time the file is given. It indexes on the changerev and only | ||||
parses the manifest if linkrev != changerev. | parses the manifest if linkrev != changerev. | ||||
Returns rename info for fn at changerev rev.''' | Returns rename info for fn at changerev rev.""" | ||||
if rev in rcache.setdefault(fn, {}): | if rev in rcache.setdefault(fn, {}): | ||||
return rcache[fn][rev] | return rcache[fn][rev] | ||||
try: | try: | ||||
fctx = repo[rev].filectx(fn) | fctx = repo[rev].filectx(fn) | ||||
for ancestor in fctx.ancestors(): | for ancestor in fctx.ancestors(): | ||||
if ancestor.path() == fn: | if ancestor.path() == fn: | ||||
renamed = ancestor.renamed() | renamed = ancestor.renamed() | ||||
for actx in fctx.ancestors(): | for actx in fctx.ancestors(): | ||||
s.add(actx.linkrev()) | s.add(actx.linkrev()) | ||||
return smartset.baseset([r for r in subset if r in s]) | return smartset.baseset([r for r in subset if r in s]) | ||||
@command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True) | @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True) | ||||
def gc(ui, *args, **opts): | def gc(ui, *args, **opts): | ||||
'''garbage collect the client and server filelog caches | """garbage collect the client and server filelog caches""" | ||||
''' | |||||
cachepaths = set() | cachepaths = set() | ||||
# get the system client cache | # get the system client cache | ||||
systemcache = shallowutil.getcachepath(ui, allowempty=True) | systemcache = shallowutil.getcachepath(ui, allowempty=True) | ||||
if systemcache: | if systemcache: | ||||
cachepaths.add(systemcache) | cachepaths.add(systemcache) | ||||
# get repo client and server cache | # get repo client and server cache | ||||
for path in ctx.walk(match): | for path in ctx.walk(match): | ||||
if (not sparsematch or sparsematch(path)) and path in mf: | if (not sparsematch or sparsematch(path)) and path in mf: | ||||
allfiles.append((path, hex(mf[path]))) | allfiles.append((path, hex(mf[path]))) | ||||
repo.fileservice.prefetch(allfiles) | repo.fileservice.prefetch(allfiles) | ||||
@command( | @command( | ||||
b'debugremotefilelog', | b'debugremotefilelog', | ||||
[(b'd', b'decompress', None, _(b'decompress the filelog first')),], | [ | ||||
(b'd', b'decompress', None, _(b'decompress the filelog first')), | |||||
], | |||||
_(b'hg debugremotefilelog <path>'), | _(b'hg debugremotefilelog <path>'), | ||||
norepo=True, | norepo=True, | ||||
) | ) | ||||
def debugremotefilelog(ui, path, **opts): | def debugremotefilelog(ui, path, **opts): | ||||
return debugcommands.debugremotefilelog(ui, path, **opts) | return debugcommands.debugremotefilelog(ui, path, **opts) | ||||
@command( | @command( | ||||
b'verifyremotefilelog', | b'verifyremotefilelog', | ||||
[(b'd', b'decompress', None, _(b'decompress the filelogs first')),], | [ | ||||
(b'd', b'decompress', None, _(b'decompress the filelogs first')), | |||||
], | |||||
_(b'hg verifyremotefilelogs <directory>'), | _(b'hg verifyremotefilelogs <directory>'), | ||||
norepo=True, | norepo=True, | ||||
) | ) | ||||
def verifyremotefilelog(ui, path, **opts): | def verifyremotefilelog(ui, path, **opts): | ||||
return debugcommands.verifyremotefilelog(ui, path, **opts) | return debugcommands.verifyremotefilelog(ui, path, **opts) | ||||
@command( | @command( |
# Clean up the repo cache directory. | # Clean up the repo cache directory. | ||||
self._cleanupdirectory(self._getrepocachepath()) | self._cleanupdirectory(self._getrepocachepath()) | ||||
# BELOW THIS ARE NON-STANDARD APIS | # BELOW THIS ARE NON-STANDARD APIS | ||||
def _cleanupdirectory(self, rootdir): | def _cleanupdirectory(self, rootdir): | ||||
"""Removes the empty directories and unnecessary files within the root | """Removes the empty directories and unnecessary files within the root | ||||
directory recursively. Note that this method does not remove the root | directory recursively. Note that this method does not remove the root | ||||
directory itself. """ | directory itself.""" | ||||
oldfiles = set() | oldfiles = set() | ||||
otherfiles = set() | otherfiles = set() | ||||
# osutil.listdir returns stat information which saves some rmdir/listdir | # osutil.listdir returns stat information which saves some rmdir/listdir | ||||
# syscalls. | # syscalls. | ||||
for name, mode in util.osutil.listdir(rootdir): | for name, mode in util.osutil.listdir(rootdir): | ||||
if stat.S_ISDIR(mode): | if stat.S_ISDIR(mode): | ||||
dirpath = os.path.join(rootdir, name) | dirpath = os.path.join(rootdir, name) |
from . import ( | from . import ( | ||||
basestore, | basestore, | ||||
constants, | constants, | ||||
shallowutil, | shallowutil, | ||||
) | ) | ||||
class ChainIndicies(object): | class ChainIndicies(object): | ||||
"""A static class for easy reference to the delta chain indicies. | """A static class for easy reference to the delta chain indicies.""" | ||||
""" | |||||
# The filename of this revision delta | # The filename of this revision delta | ||||
NAME = 0 | NAME = 0 | ||||
# The mercurial file node for this revision delta | # The mercurial file node for this revision delta | ||||
NODE = 1 | NODE = 1 | ||||
# The filename of the delta base's revision. This is useful when delta | # The filename of the delta base's revision. This is useful when delta | ||||
# between different files (like in the case of a move or copy, we can delta | # between different files (like in the case of a move or copy, we can delta | ||||
# against the original file content). | # against the original file content). | ||||
while chain: | while chain: | ||||
delta = chain.pop()[ChainIndicies.DATA] | delta = chain.pop()[ChainIndicies.DATA] | ||||
text = mdiff.patches(text, [delta]) | text = mdiff.patches(text, [delta]) | ||||
return text | return text | ||||
@basestore.baseunionstore.retriable | @basestore.baseunionstore.retriable | ||||
def getdelta(self, name, node): | def getdelta(self, name, node): | ||||
"""Return the single delta entry for the given name/node pair. | """Return the single delta entry for the given name/node pair.""" | ||||
""" | |||||
for store in self.stores: | for store in self.stores: | ||||
try: | try: | ||||
return store.getdelta(name, node) | return store.getdelta(name, node) | ||||
except KeyError: | except KeyError: | ||||
pass | pass | ||||
raise KeyError((name, hex(node))) | raise KeyError((name, hex(node))) | ||||
writerthread.join() | writerthread.join() | ||||
# End the command | # End the command | ||||
pipeo.write(b'\n') | pipeo.write(b'\n') | ||||
pipeo.flush() | pipeo.flush() | ||||
class fileserverclient(object): | class fileserverclient(object): | ||||
"""A client for requesting files from the remote file server. | """A client for requesting files from the remote file server.""" | ||||
""" | |||||
def __init__(self, repo): | def __init__(self, repo): | ||||
ui = repo.ui | ui = repo.ui | ||||
self.repo = repo | self.repo = repo | ||||
self.ui = ui | self.ui = ui | ||||
self.cacheprocess = ui.config(b"remotefilelog", b"cacheprocess") | self.cacheprocess = ui.config(b"remotefilelog", b"cacheprocess") | ||||
if self.cacheprocess: | if self.cacheprocess: | ||||
self.cacheprocess = util.expandpath(self.cacheprocess) | self.cacheprocess = util.expandpath(self.cacheprocess) | ||||
) | ) | ||||
if self.remotecache.connected: | if self.remotecache.connected: | ||||
self.remotecache.close() | self.remotecache.close() | ||||
def prefetch( | def prefetch( | ||||
self, fileids, force=False, fetchdata=True, fetchhistory=False | self, fileids, force=False, fetchdata=True, fetchhistory=False | ||||
): | ): | ||||
"""downloads the given file versions to the cache | """downloads the given file versions to the cache""" | ||||
""" | |||||
repo = self.repo | repo = self.repo | ||||
idstocheck = [] | idstocheck = [] | ||||
for file, id in fileids: | for file, id in fileids: | ||||
# hack | # hack | ||||
# - we don't use .hgtags | # - we don't use .hgtags | ||||
# - workingctx produces ids with length 42, | # - workingctx produces ids with length 42, | ||||
# which we skip since they aren't in any cache | # which we skip since they aren't in any cache | ||||
if ( | if ( |
linknode = self._adjustlinknode( | linknode = self._adjustlinknode( | ||||
self._path, self._filelog, self._filenode, self._descendantrev | self._path, self._filelog, self._filenode, self._descendantrev | ||||
) | ) | ||||
return self._repo.unfiltered().changelog.rev(linknode) | return self._repo.unfiltered().changelog.rev(linknode) | ||||
else: | else: | ||||
return self.linkrev() | return self.linkrev() | ||||
def filectx(self, fileid, changeid=None): | def filectx(self, fileid, changeid=None): | ||||
'''opens an arbitrary revision of the file without | """opens an arbitrary revision of the file without | ||||
opening a new filelog''' | opening a new filelog""" | ||||
return remotefilectx( | return remotefilectx( | ||||
self._repo, | self._repo, | ||||
self._path, | self._path, | ||||
fileid=fileid, | fileid=fileid, | ||||
filelog=self._filelog, | filelog=self._filelog, | ||||
changeid=changeid, | changeid=changeid, | ||||
) | ) | ||||
constants, | constants, | ||||
shallowutil, | shallowutil, | ||||
) | ) | ||||
_sshv1server = wireprotoserver.sshv1protocolhandler | _sshv1server = wireprotoserver.sshv1protocolhandler | ||||
def setupserver(ui, repo): | def setupserver(ui, repo): | ||||
"""Sets up a normal Mercurial repo so it can serve files to shallow repos. | """Sets up a normal Mercurial repo so it can serve files to shallow repos.""" | ||||
""" | |||||
onetimesetup(ui) | onetimesetup(ui) | ||||
# don't send files to shallow clients during pulls | # don't send files to shallow clients during pulls | ||||
def generatefiles( | def generatefiles( | ||||
orig, self, changedfiles, linknodes, commonrevs, source, *args, **kwargs | orig, self, changedfiles, linknodes, commonrevs, source, *args, **kwargs | ||||
): | ): | ||||
caps = self._bundlecaps or [] | caps = self._bundlecaps or [] | ||||
if constants.BUNDLE2_CAPABLITY in caps: | if constants.BUNDLE2_CAPABLITY in caps: | ||||
changegroup.cgpacker, b'generatefiles', generatefiles | changegroup.cgpacker, b'generatefiles', generatefiles | ||||
) | ) | ||||
onetime = False | onetime = False | ||||
def onetimesetup(ui): | def onetimesetup(ui): | ||||
"""Configures the wireprotocol for both clients and servers. | """Configures the wireprotocol for both clients and servers.""" | ||||
""" | |||||
global onetime | global onetime | ||||
if onetime: | if onetime: | ||||
return | return | ||||
onetime = True | onetime = True | ||||
# support file content requests | # support file content requests | ||||
wireprotov1server.wireprotocommand( | wireprotov1server.wireprotocommand( | ||||
b'x_rfl_getflogheads', b'path', permission=b'pull' | b'x_rfl_getflogheads', b'path', permission=b'pull' | ||||
os.umask(oldumask) | os.umask(oldumask) | ||||
else: | else: | ||||
with open(filecachepath, b"rb") as f: | with open(filecachepath, b"rb") as f: | ||||
text = f.read() | text = f.read() | ||||
return text | return text | ||||
def getflogheads(repo, proto, path): | def getflogheads(repo, proto, path): | ||||
"""A server api for requesting a filelog's heads | """A server api for requesting a filelog's heads""" | ||||
""" | |||||
flog = repo.file(path) | flog = repo.file(path) | ||||
heads = flog.heads() | heads = flog.heads() | ||||
return b'\n'.join((hex(head) for head in heads if head != nullid)) | return b'\n'.join((hex(head) for head in heads if head != nullid)) | ||||
def getfile(repo, proto, file, node): | def getfile(repo, proto, file, node): | ||||
"""A server api for requesting a particular version of a file. Can be used | """A server api for requesting a particular version of a file. Can be used | ||||
in batches to request many files at once. The return protocol is: | in batches to request many files at once. The return protocol is: | ||||
cachepath = os.path.join(repo.path, b"remotefilelogcache") | cachepath = os.path.join(repo.path, b"remotefilelogcache") | ||||
node = bin(node.strip()) | node = bin(node.strip()) | ||||
if node == nullid: | if node == nullid: | ||||
return b'0\0' | return b'0\0' | ||||
return b'0\0' + _loadfileblob(repo, cachepath, file, node) | return b'0\0' + _loadfileblob(repo, cachepath, file, node) | ||||
def getfiles(repo, proto): | def getfiles(repo, proto): | ||||
"""A server api for requesting particular versions of particular files. | """A server api for requesting particular versions of particular files.""" | ||||
""" | |||||
if shallowutil.isenabled(repo): | if shallowutil.isenabled(repo): | ||||
raise error.Abort(_(b'cannot fetch remote files from shallow repo')) | raise error.Abort(_(b'cannot fetch remote files from shallow repo')) | ||||
if not isinstance(proto, _sshv1server): | if not isinstance(proto, _sshv1server): | ||||
raise error.Abort(_(b'cannot fetch remote files over non-ssh protocol')) | raise error.Abort(_(b'cannot fetch remote files over non-ssh protocol')) | ||||
def streamer(): | def streamer(): | ||||
fin = proto._fin | fin = proto._fin | ||||
kwargs = {} | kwargs = {} | ||||
if repo.ui.configbool(b'devel', b'remotefilelog.bg-wait'): | if repo.ui.configbool(b'devel', b'remotefilelog.bg-wait'): | ||||
kwargs['record_wait'] = repo.ui.atexit | kwargs['record_wait'] = repo.ui.atexit | ||||
procutil.runbgcommand(cmd, encoding.environ, ensurestart=False, **kwargs) | procutil.runbgcommand(cmd, encoding.environ, ensurestart=False, **kwargs) | ||||
def fullrepack(repo, options=None): | def fullrepack(repo, options=None): | ||||
"""If ``packsonly`` is True, stores creating only loose objects are skipped. | """If ``packsonly`` is True, stores creating only loose objects are skipped.""" | ||||
""" | |||||
if util.safehasattr(repo, 'shareddatastores'): | if util.safehasattr(repo, 'shareddatastores'): | ||||
datasource = contentstore.unioncontentstore(*repo.shareddatastores) | datasource = contentstore.unioncontentstore(*repo.shareddatastores) | ||||
historysource = metadatastore.unionmetadatastore( | historysource = metadatastore.unionmetadatastore( | ||||
*repo.sharedhistorystores, allowincomplete=True | *repo.sharedhistorystores, allowincomplete=True | ||||
) | ) | ||||
packpath = shallowutil.getcachepackpath( | packpath = shallowutil.getcachepackpath( | ||||
repo, constants.FILEPACK_CATEGORY | repo, constants.FILEPACK_CATEGORY | ||||
return value | return value | ||||
def addcreated(self, value): | def addcreated(self, value): | ||||
self.created.add(value) | self.created.add(value) | ||||
class repackentry(object): | class repackentry(object): | ||||
"""Simple class representing a single revision entry in the repackledger. | """Simple class representing a single revision entry in the repackledger.""" | ||||
""" | |||||
__slots__ = ( | __slots__ = ( | ||||
'filename', | 'filename', | ||||
'node', | 'node', | ||||
'datasource', | 'datasource', | ||||
'historysource', | 'historysource', | ||||
'datarepacked', | 'datarepacked', | ||||
'historyrepacked', | 'historyrepacked', |
raise error.Abort( | raise error.Abort( | ||||
b"no remotefilelog server " | b"no remotefilelog server " | ||||
b"configured - is your .hg/hgrc trusted?" | b"configured - is your .hg/hgrc trusted?" | ||||
) | ) | ||||
return path | return path | ||||
def maybesparsematch(self, *revs, **kwargs): | def maybesparsematch(self, *revs, **kwargs): | ||||
''' | """ | ||||
A wrapper that allows the remotefilelog to invoke sparsematch() if | A wrapper that allows the remotefilelog to invoke sparsematch() if | ||||
this is a sparse repository, or returns None if this is not a | this is a sparse repository, or returns None if this is not a | ||||
sparse repository. | sparse repository. | ||||
''' | """ | ||||
if revs: | if revs: | ||||
ret = sparse.matcher(repo, revs=revs) | ret = sparse.matcher(repo, revs=revs) | ||||
else: | else: | ||||
ret = sparse.matcher(repo) | ret = sparse.matcher(repo) | ||||
if ret.always(): | if ret.always(): | ||||
return None | return None | ||||
return ret | return ret | ||||
self.fileservice.prefetch(files) | self.fileservice.prefetch(files) | ||||
return super(shallowrepository, self).commitctx( | return super(shallowrepository, self).commitctx( | ||||
ctx, error=error, origctx=origctx | ctx, error=error, origctx=origctx | ||||
) | ) | ||||
def backgroundprefetch( | def backgroundprefetch( | ||||
self, revs, base=None, repack=False, pats=None, opts=None | self, revs, base=None, repack=False, pats=None, opts=None | ||||
): | ): | ||||
"""Runs prefetch in background with optional repack | """Runs prefetch in background with optional repack""" | ||||
""" | |||||
cmd = [procutil.hgexecutable(), b'-R', repo.origroot, b'prefetch'] | cmd = [procutil.hgexecutable(), b'-R', repo.origroot, b'prefetch'] | ||||
if repack: | if repack: | ||||
cmd.append(b'--repack') | cmd.append(b'--repack') | ||||
if revs: | if revs: | ||||
cmd += [b'-r', revs] | cmd += [b'-r', revs] | ||||
# We know this command will find a binary, so don't block | # We know this command will find a binary, so don't block | ||||
# on it starting. | # on it starting. | ||||
kwargs = {} | kwargs = {} |
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
templatekeyword = registrar.templatekeyword() | templatekeyword = registrar.templatekeyword() | ||||
revsetpredicate = registrar.revsetpredicate() | revsetpredicate = registrar.revsetpredicate() | ||||
configitem( | configitem( | ||||
b'remotenames', b'bookmarks', default=True, | b'remotenames', | ||||
b'bookmarks', | |||||
default=True, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'remotenames', b'branches', default=True, | b'remotenames', | ||||
b'branches', | |||||
default=True, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'remotenames', b'hoistedpeer', default=b'default', | b'remotenames', | ||||
b'hoistedpeer', | |||||
default=b'default', | |||||
) | ) | ||||
class lazyremotenamedict(mutablemapping): | class lazyremotenamedict(mutablemapping): | ||||
""" | """ | ||||
Read-only dict-like Class to lazily resolve remotename entries | Read-only dict-like Class to lazily resolve remotename entries | ||||
We are doing that because remotenames startup was slow. | We are doing that because remotenames startup was slow. |
) | ) | ||||
hg.schemes[scheme] = ShortRepository(url, scheme, t) | hg.schemes[scheme] = ShortRepository(url, scheme, t) | ||||
extensions.wrapfunction(util, b'hasdriveletter', hasdriveletter) | extensions.wrapfunction(util, b'hasdriveletter', hasdriveletter) | ||||
@command(b'debugexpandscheme', norepo=True) | @command(b'debugexpandscheme', norepo=True) | ||||
def expandscheme(ui, url, **opts): | def expandscheme(ui, url, **opts): | ||||
"""given a repo path, provide the scheme-expanded path | """given a repo path, provide the scheme-expanded path""" | ||||
""" | |||||
repo = hg._peerlookup(url) | repo = hg._peerlookup(url) | ||||
if isinstance(repo, ShortRepository): | if isinstance(repo, ShortRepository): | ||||
url = repo.resolve(url) | url = repo.resolve(url) | ||||
ui.write(url + b'\n') | ui.write(url + b'\n') |
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
@command( | @command( | ||||
b'share', | b'share', | ||||
[ | [ | ||||
(b'U', b'noupdate', None, _(b'do not create a working directory')), | (b'U', b'noupdate', None, _(b'do not create a working directory')), | ||||
(b'B', b'bookmarks', None, _(b'also share bookmarks')), | (b'B', b'bookmarks', None, _(b'also share bookmarks')), | ||||
(b'', b'relative', None, _(b'point to source using a relative path'),), | ( | ||||
b'', | |||||
b'relative', | |||||
None, | |||||
_(b'point to source using a relative path'), | |||||
), | |||||
], | ], | ||||
_(b'[-U] [-B] SOURCE [DEST]'), | _(b'[-U] [-B] SOURCE [DEST]'), | ||||
helpcategory=command.CATEGORY_REPO_CREATION, | helpcategory=command.CATEGORY_REPO_CREATION, | ||||
norepo=True, | norepo=True, | ||||
) | ) | ||||
def share( | def share( | ||||
ui, source, dest=None, noupdate=False, bookmarks=False, relative=False | ui, source, dest=None, noupdate=False, bookmarks=False, relative=False | ||||
): | ): |
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'transplant', b'filter', default=None, | b'transplant', | ||||
b'filter', | |||||
default=None, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'transplant', b'log', default=None, | b'transplant', | ||||
b'log', | |||||
default=None, | |||||
) | ) | ||||
class transplantentry(object): | class transplantentry(object): | ||||
def __init__(self, lnode, rnode): | def __init__(self, lnode, rnode): | ||||
self.lnode = lnode | self.lnode = lnode | ||||
self.rnode = rnode | self.rnode = rnode | ||||
editform = cmdutil.mergeeditform(repo[None], b'transplant') | editform = cmdutil.mergeeditform(repo[None], b'transplant') | ||||
return cmdutil.getcommiteditor( | return cmdutil.getcommiteditor( | ||||
editform=editform, **pycompat.strkwargs(opts) | editform=editform, **pycompat.strkwargs(opts) | ||||
) | ) | ||||
self.getcommiteditor = getcommiteditor | self.getcommiteditor = getcommiteditor | ||||
def applied(self, repo, node, parent): | def applied(self, repo, node, parent): | ||||
'''returns True if a node is already an ancestor of parent | """returns True if a node is already an ancestor of parent | ||||
or is parent or has already been transplanted''' | or is parent or has already been transplanted""" | ||||
if hasnode(repo, parent): | if hasnode(repo, parent): | ||||
parentrev = repo.changelog.rev(parent) | parentrev = repo.changelog.rev(parent) | ||||
if hasnode(repo, node): | if hasnode(repo, node): | ||||
rev = repo.changelog.rev(node) | rev = repo.changelog.rev(node) | ||||
reachable = repo.changelog.ancestors( | reachable = repo.changelog.ancestors( | ||||
[parentrev], rev, inclusive=True | [parentrev], rev, inclusive=True | ||||
) | ) | ||||
if rev in reachable: | if rev in reachable: | ||||
], | ], | ||||
_( | _( | ||||
b'hg transplant [-s REPO] [-b BRANCH [-a]] [-p REV] ' | b'hg transplant [-s REPO] [-b BRANCH [-a]] [-p REV] ' | ||||
b'[-m REV] [REV]...' | b'[-m REV] [REV]...' | ||||
), | ), | ||||
helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, | helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, | ||||
) | ) | ||||
def transplant(ui, repo, *revs, **opts): | def transplant(ui, repo, *revs, **opts): | ||||
'''transplant changesets from another branch | """transplant changesets from another branch | ||||
Selected changesets will be applied on top of the current working | Selected changesets will be applied on top of the current working | ||||
directory with the log of the original changeset. The changesets | directory with the log of the original changeset. The changesets | ||||
are copied and will thus appear twice in the history with different | are copied and will thus appear twice in the history with different | ||||
identities. | identities. | ||||
Consider using the graft command if everything is inside the same | Consider using the graft command if everything is inside the same | ||||
repository - it will use merges and will usually give a better result. | repository - it will use merges and will usually give a better result. | ||||
proper parent changeset by calling :hg:`transplant --parent`. | proper parent changeset by calling :hg:`transplant --parent`. | ||||
If no merges or revisions are provided, :hg:`transplant` will | If no merges or revisions are provided, :hg:`transplant` will | ||||
start an interactive changeset browser. | start an interactive changeset browser. | ||||
If a changeset application fails, you can fix the merge by hand | If a changeset application fails, you can fix the merge by hand | ||||
and then resume where you left off by calling :hg:`transplant | and then resume where you left off by calling :hg:`transplant | ||||
--continue/-c`. | --continue/-c`. | ||||
''' | """ | ||||
with repo.wlock(): | with repo.wlock(): | ||||
return _dotransplant(ui, repo, *revs, **opts) | return _dotransplant(ui, repo, *revs, **opts) | ||||
def _dotransplant(ui, repo, *revs, **opts): | def _dotransplant(ui, repo, *revs, **opts): | ||||
def incwalk(repo, csets, match=util.always): | def incwalk(repo, csets, match=util.always): | ||||
for node in csets: | for node in csets: | ||||
if match(node): | if match(node): | ||||
yield node | yield node | ||||
def transplantwalk(repo, dest, heads, match=util.always): | def transplantwalk(repo, dest, heads, match=util.always): | ||||
'''Yield all nodes that are ancestors of a head but not ancestors | """Yield all nodes that are ancestors of a head but not ancestors | ||||
of dest. | of dest. | ||||
If no heads are specified, the heads of repo will be used.''' | If no heads are specified, the heads of repo will be used.""" | ||||
if not heads: | if not heads: | ||||
heads = repo.heads() | heads = repo.heads() | ||||
ancestors = [] | ancestors = [] | ||||
ctx = repo[dest] | ctx = repo[dest] | ||||
for head in heads: | for head in heads: | ||||
ancestors.append(ctx.ancestor(repo[head]).node()) | ancestors.append(ctx.ancestor(repo[head]).node()) | ||||
for node in repo.changelog.nodesbetween(ancestors, heads)[0]: | for node in repo.changelog.nodesbetween(ancestors, heads)[0]: | ||||
if match(node): | if match(node): | ||||
return tp.resume(repo, repo, {}) | return tp.resume(repo, repo, {}) | ||||
revsetpredicate = registrar.revsetpredicate() | revsetpredicate = registrar.revsetpredicate() | ||||
@revsetpredicate(b'transplanted([set])') | @revsetpredicate(b'transplanted([set])') | ||||
def revsettransplanted(repo, subset, x): | def revsettransplanted(repo, subset, x): | ||||
"""Transplanted changesets in set, or all transplanted changesets. | """Transplanted changesets in set, or all transplanted changesets.""" | ||||
""" | |||||
if x: | if x: | ||||
s = revset.getset(repo, subset, x) | s = revset.getset(repo, subset, x) | ||||
else: | else: | ||||
s = subset | s = subset | ||||
return smartset.baseset( | return smartset.baseset( | ||||
[r for r in s if repo[r].extra().get(b'transplant_source')] | [r for r in s if repo[r].extra().get(b'transplant_source')] | ||||
) | ) | ||||
cmdtable = {} | cmdtable = {} | ||||
command = registrar.command(cmdtable) | command = registrar.command(cmdtable) | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'experimental', b'uncommitondirtywdir', default=False, | b'experimental', | ||||
b'uncommitondirtywdir', | |||||
default=False, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'experimental', b'uncommit.keep', default=False, | b'experimental', | ||||
b'uncommit.keep', | |||||
default=False, | |||||
) | ) | ||||
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | ||||
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | ||||
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
# Encoding.encoding may be updated by --encoding option. | # Encoding.encoding may be updated by --encoding option. | ||||
# Use a lambda do delay the resolution. | # Use a lambda do delay the resolution. | ||||
configitem( | configitem( | ||||
b'win32mbcs', b'encoding', default=lambda: encoding.encoding, | b'win32mbcs', | ||||
b'encoding', | |||||
default=lambda: encoding.encoding, | |||||
) | ) | ||||
_encoding = None # see extsetup | _encoding = None # see extsetup | ||||
def decode(arg): | def decode(arg): | ||||
if isinstance(arg, bytes): | if isinstance(arg, bytes): | ||||
uarg = arg.decode(_encoding) | uarg = arg.decode(_encoding) |
# be specifying the version(s) of Mercurial they are tested with, or | # be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | # leave the attribute unspecified. | ||||
testedwith = b'ships-with-hg-core' | testedwith = b'ships-with-hg-core' | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'win32text', b'warn', default=True, | b'win32text', | ||||
b'warn', | |||||
default=True, | |||||
) | ) | ||||
# regexp for single LF without CR preceding. | # regexp for single LF without CR preceding. | ||||
re_single_lf = re.compile(b'(^|[^\r])\n', re.MULTILINE) | re_single_lf = re.compile(b'(^|[^\r])\n', re.MULTILINE) | ||||
newlinestr = {b'\r\n': b'CRLF', b'\r': b'CR'} | newlinestr = {b'\r\n': b'CRLF', b'\r': b'CR'} | ||||
filterstr = {b'\r\n': b'clever', b'\r': b'mac'} | filterstr = {b'\r\n': b'clever', b'\r': b'mac'} | ||||
checkers.append((func, level)) | checkers.append((func, level)) | ||||
func.match = match | func.match = match | ||||
return func | return func | ||||
return decorator | return decorator | ||||
def match(checker, pe): | def match(checker, pe): | ||||
"""Examine whether POEntry "pe" is target of specified checker or not | """Examine whether POEntry "pe" is target of specified checker or not""" | ||||
""" | |||||
if not checker.match(pe.msgid): | if not checker.match(pe.msgid): | ||||
return | return | ||||
# examine suppression by translator comment | # examine suppression by translator comment | ||||
nochecker = 'no-%s-check' % checker.__name__ | nochecker = 'no-%s-check' % checker.__name__ | ||||
for tc in pe.tcomment.split(): | for tc in pe.tcomment.split(): | ||||
if nochecker == tc: | if nochecker == tc: | ||||
return | return | ||||
return True | return True |
gca = commonancestorsheads(pfunc, *orignodes) | gca = commonancestorsheads(pfunc, *orignodes) | ||||
if len(gca) <= 1: | if len(gca) <= 1: | ||||
return gca | return gca | ||||
return deepest(gca) | return deepest(gca) | ||||
class incrementalmissingancestors(object): | class incrementalmissingancestors(object): | ||||
'''persistent state used to calculate missing ancestors incrementally | """persistent state used to calculate missing ancestors incrementally | ||||
Although similar in spirit to lazyancestors below, this is a separate class | Although similar in spirit to lazyancestors below, this is a separate class | ||||
because trying to support contains and missingancestors operations with the | because trying to support contains and missingancestors operations with the | ||||
same internal data structures adds needless complexity.''' | same internal data structures adds needless complexity.""" | ||||
def __init__(self, pfunc, bases): | def __init__(self, pfunc, bases): | ||||
self.bases = set(bases) | self.bases = set(bases) | ||||
if not self.bases: | if not self.bases: | ||||
self.bases.add(nullrev) | self.bases.add(nullrev) | ||||
self.pfunc = pfunc | self.pfunc = pfunc | ||||
def hasbases(self): | def hasbases(self): | ||||
continue | continue | ||||
revs.discard(curr) | revs.discard(curr) | ||||
bases.update(pfunc(curr)) | bases.update(pfunc(curr)) | ||||
if len(revs) == keepcount: | if len(revs) == keepcount: | ||||
# no more potential revs to discard | # no more potential revs to discard | ||||
break | break | ||||
def missingancestors(self, revs): | def missingancestors(self, revs): | ||||
'''return all the ancestors of revs that are not ancestors of self.bases | """return all the ancestors of revs that are not ancestors of self.bases | ||||
This may include elements from revs. | This may include elements from revs. | ||||
Equivalent to the revset (::revs - ::self.bases). Revs are returned in | Equivalent to the revset (::revs - ::self.bases). Revs are returned in | ||||
revision number order, which is a topological order.''' | revision number order, which is a topological order.""" | ||||
revsvisit = set(revs) | revsvisit = set(revs) | ||||
basesvisit = self.bases | basesvisit = self.bases | ||||
pfunc = self.pfunc | pfunc = self.pfunc | ||||
bothvisit = revsvisit.intersection(basesvisit) | bothvisit = revsvisit.intersection(basesvisit) | ||||
revsvisit.difference_update(bothvisit) | revsvisit.difference_update(bothvisit) | ||||
if not revsvisit: | if not revsvisit: | ||||
return [] | return [] | ||||
stringio = util.stringio | stringio = util.stringio | ||||
# from unzip source code: | # from unzip source code: | ||||
_UNX_IFREG = 0x8000 | _UNX_IFREG = 0x8000 | ||||
_UNX_IFLNK = 0xA000 | _UNX_IFLNK = 0xA000 | ||||
def tidyprefix(dest, kind, prefix): | def tidyprefix(dest, kind, prefix): | ||||
'''choose prefix to use for names in archive. make sure prefix is | """choose prefix to use for names in archive. make sure prefix is | ||||
safe for consumers.''' | safe for consumers.""" | ||||
if prefix: | if prefix: | ||||
prefix = util.normpath(prefix) | prefix = util.normpath(prefix) | ||||
else: | else: | ||||
if not isinstance(dest, bytes): | if not isinstance(dest, bytes): | ||||
raise ValueError(b'dest must be string if no prefix') | raise ValueError(b'dest must be string if no prefix') | ||||
prefix = os.path.basename(dest) | prefix = os.path.basename(dest) | ||||
lower = prefix.lower() | lower = prefix.lower() | ||||
dirty = b'+' | dirty = b'+' | ||||
fm.data(dirty=dirty) | fm.data(dirty=dirty) | ||||
fm.end() | fm.end() | ||||
return out.getvalue() | return out.getvalue() | ||||
class tarit(object): | class tarit(object): | ||||
'''write archive to tar file or stream. can write uncompressed, | """write archive to tar file or stream. can write uncompressed, | ||||
or compress with gzip or bzip2.''' | or compress with gzip or bzip2.""" | ||||
if pycompat.ispy3: | if pycompat.ispy3: | ||||
GzipFileWithTime = gzip.GzipFile # camelcase-required | GzipFileWithTime = gzip.GzipFile # camelcase-required | ||||
else: | else: | ||||
class GzipFileWithTime(gzip.GzipFile): | class GzipFileWithTime(gzip.GzipFile): | ||||
def __init__(self, *args, **kw): | def __init__(self, *args, **kw): | ||||
timestamp = None | timestamp = None | ||||
gzfileobj = self.GzipFileWithTime( | gzfileobj = self.GzipFileWithTime( | ||||
name, | name, | ||||
pycompat.sysstr(mode + b'b'), | pycompat.sysstr(mode + b'b'), | ||||
zlib.Z_BEST_COMPRESSION, | zlib.Z_BEST_COMPRESSION, | ||||
fileobj, | fileobj, | ||||
mtime=mtime, | mtime=mtime, | ||||
) | ) | ||||
self.fileobj = gzfileobj | self.fileobj = gzfileobj | ||||
return tarfile.TarFile.taropen( # pytype: disable=attribute-error | return ( | ||||
tarfile.TarFile.taropen( # pytype: disable=attribute-error | |||||
name, pycompat.sysstr(mode), gzfileobj | name, pycompat.sysstr(mode), gzfileobj | ||||
) | ) | ||||
) | |||||
else: | else: | ||||
try: | try: | ||||
return tarfile.open( | return tarfile.open( | ||||
name, pycompat.sysstr(mode + kind), fileobj | name, pycompat.sysstr(mode + kind), fileobj | ||||
) | ) | ||||
except tarfile.CompressionError as e: | except tarfile.CompressionError as e: | ||||
raise error.Abort(pycompat.bytestr(e)) | raise error.Abort(pycompat.bytestr(e)) | ||||
def done(self): | def done(self): | ||||
self.z.close() | self.z.close() | ||||
if self.fileobj: | if self.fileobj: | ||||
self.fileobj.close() | self.fileobj.close() | ||||
class zipit(object): | class zipit(object): | ||||
'''write archive to zip file or stream. can write uncompressed, | """write archive to zip file or stream. can write uncompressed, | ||||
or compressed with deflate.''' | or compressed with deflate.""" | ||||
def __init__(self, dest, mtime, compress=True): | def __init__(self, dest, mtime, compress=True): | ||||
if isinstance(dest, bytes): | if isinstance(dest, bytes): | ||||
dest = pycompat.fsdecode(dest) | dest = pycompat.fsdecode(dest) | ||||
self.z = zipfile.ZipFile( | self.z = zipfile.ZipFile( | ||||
dest, 'w', compress and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED | dest, 'w', compress and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED | ||||
) | ) | ||||
node, | node, | ||||
kind, | kind, | ||||
decode=True, | decode=True, | ||||
match=None, | match=None, | ||||
prefix=b'', | prefix=b'', | ||||
mtime=None, | mtime=None, | ||||
subrepos=False, | subrepos=False, | ||||
): | ): | ||||
'''create archive of repo as it was at node. | """create archive of repo as it was at node. | ||||
dest can be name of directory, name of archive file, or file | dest can be name of directory, name of archive file, or file | ||||
object to write archive to. | object to write archive to. | ||||
kind is type of archive to create. | kind is type of archive to create. | ||||
decode tells whether to put files through decode filters from | decode tells whether to put files through decode filters from | ||||
hgrc. | hgrc. | ||||
match is a matcher to filter names of files to write to archive. | match is a matcher to filter names of files to write to archive. | ||||
prefix is name of path to put before every archive member. | prefix is name of path to put before every archive member. | ||||
mtime is the modified time, in seconds, or None to use the changeset time. | mtime is the modified time, in seconds, or None to use the changeset time. | ||||
subrepos tells whether to include subrepos. | subrepos tells whether to include subrepos. | ||||
''' | """ | ||||
if kind == b'txz' and not pycompat.ispy3: | if kind == b'txz' and not pycompat.ispy3: | ||||
raise error.Abort(_(b'xz compression is only available in Python 3')) | raise error.Abort(_(b'xz compression is only available in Python 3')) | ||||
if kind == b'files': | if kind == b'files': | ||||
if prefix: | if prefix: | ||||
raise error.Abort(_(b'cannot give prefix when archiving to files')) | raise error.Abort(_(b'cannot give prefix when archiving to files')) | ||||
else: | else: |
else: | else: | ||||
nrefs.remove(mark) | nrefs.remove(mark) | ||||
def names(self, node): | def names(self, node): | ||||
"""Return a sorted list of bookmarks pointing to the specified node""" | """Return a sorted list of bookmarks pointing to the specified node""" | ||||
return self._nodemap.get(node, []) | return self._nodemap.get(node, []) | ||||
def applychanges(self, repo, tr, changes): | def applychanges(self, repo, tr, changes): | ||||
"""Apply a list of changes to bookmarks | """Apply a list of changes to bookmarks""" | ||||
""" | |||||
bmchanges = tr.changes.get(b'bookmarks') | bmchanges = tr.changes.get(b'bookmarks') | ||||
for name, node in changes: | for name, node in changes: | ||||
old = self._refmap.get(name) | old = self._refmap.get(name) | ||||
if node is None: | if node is None: | ||||
self._del(name) | self._del(name) | ||||
else: | else: | ||||
self._set(name, node) | self._set(name, node) | ||||
if bmchanges is not None: | if bmchanges is not None: | ||||
heads = [] | heads = [] | ||||
for mark, n in pycompat.iteritems(repo._bookmarks): | for mark, n in pycompat.iteritems(repo._bookmarks): | ||||
if mark.split(b'@', 1)[0] == name: | if mark.split(b'@', 1)[0] == name: | ||||
heads.append(n) | heads.append(n) | ||||
return heads | return heads | ||||
def calculateupdate(ui, repo): | def calculateupdate(ui, repo): | ||||
'''Return a tuple (activemark, movemarkfrom) indicating the active bookmark | """Return a tuple (activemark, movemarkfrom) indicating the active bookmark | ||||
and where to move the active bookmark from, if needed.''' | and where to move the active bookmark from, if needed.""" | ||||
checkout, movemarkfrom = None, None | checkout, movemarkfrom = None, None | ||||
activemark = repo._activebookmark | activemark = repo._activebookmark | ||||
if isactivewdirparent(repo): | if isactivewdirparent(repo): | ||||
movemarkfrom = repo[b'.'].node() | movemarkfrom = repo[b'.'].node() | ||||
elif activemark: | elif activemark: | ||||
ui.status(_(b"updating to active bookmark %s\n") % activemark) | ui.status(_(b"updating to active bookmark %s\n") % activemark) | ||||
checkout = activemark | checkout = activemark | ||||
return (checkout, movemarkfrom) | return (checkout, movemarkfrom) | ||||
if new not in repo: | if new not in repo: | ||||
return False | return False | ||||
changes = [(key, repo[new].node())] | changes = [(key, repo[new].node())] | ||||
marks.applychanges(repo, tr, changes) | marks.applychanges(repo, tr, changes) | ||||
return True | return True | ||||
def comparebookmarks(repo, srcmarks, dstmarks, targets=None): | def comparebookmarks(repo, srcmarks, dstmarks, targets=None): | ||||
'''Compare bookmarks between srcmarks and dstmarks | """Compare bookmarks between srcmarks and dstmarks | ||||
This returns tuple "(addsrc, adddst, advsrc, advdst, diverge, | This returns tuple "(addsrc, adddst, advsrc, advdst, diverge, | ||||
differ, invalid)", each are list of bookmarks below: | differ, invalid)", each are list of bookmarks below: | ||||
:addsrc: added on src side (removed on dst side, perhaps) | :addsrc: added on src side (removed on dst side, perhaps) | ||||
:adddst: added on dst side (removed on src side, perhaps) | :adddst: added on dst side (removed on src side, perhaps) | ||||
:advsrc: advanced on src side | :advsrc: advanced on src side | ||||
:advdst: advanced on dst side | :advdst: advanced on dst side | ||||
:diverge: diverge | :diverge: diverge | ||||
:differ: changed, but changeset referred on src is unknown on dst | :differ: changed, but changeset referred on src is unknown on dst | ||||
:invalid: unknown on both side | :invalid: unknown on both side | ||||
:same: same on both side | :same: same on both side | ||||
Each elements of lists in result tuple is tuple "(bookmark name, | Each elements of lists in result tuple is tuple "(bookmark name, | ||||
changeset ID on source side, changeset ID on destination | changeset ID on source side, changeset ID on destination | ||||
side)". Each changeset ID is a binary node or None. | side)". Each changeset ID is a binary node or None. | ||||
Changeset IDs of tuples in "addsrc", "adddst", "differ" or | Changeset IDs of tuples in "addsrc", "adddst", "differ" or | ||||
"invalid" list may be unknown for repo. | "invalid" list may be unknown for repo. | ||||
If "targets" is specified, only bookmarks listed in it are | If "targets" is specified, only bookmarks listed in it are | ||||
examined. | examined. | ||||
''' | """ | ||||
if targets: | if targets: | ||||
bset = set(targets) | bset = set(targets) | ||||
else: | else: | ||||
srcmarkset = set(srcmarks) | srcmarkset = set(srcmarks) | ||||
dstmarkset = set(dstmarks) | dstmarkset = set(dstmarks) | ||||
bset = srcmarkset | dstmarkset | bset = srcmarkset | dstmarkset | ||||
else: | else: | ||||
# it is too expensive to examine in detail, in this case | # it is too expensive to examine in detail, in this case | ||||
differ((b, scid, dcid)) | differ((b, scid, dcid)) | ||||
return results | return results | ||||
def _diverge(ui, b, path, localmarks, remotenode): | def _diverge(ui, b, path, localmarks, remotenode): | ||||
'''Return appropriate diverged bookmark for specified ``path`` | """Return appropriate diverged bookmark for specified ``path`` | ||||
This returns None, if it is failed to assign any divergent | This returns None, if it is failed to assign any divergent | ||||
bookmark name. | bookmark name. | ||||
This reuses already existing one with "@number" suffix, if it | This reuses already existing one with "@number" suffix, if it | ||||
refers ``remotenode``. | refers ``remotenode``. | ||||
''' | """ | ||||
if b == b'@': | if b == b'@': | ||||
b = b'' | b = b'' | ||||
# try to use an @pathalias suffix | # try to use an @pathalias suffix | ||||
# if an @pathalias already exists, we overwrite (update) it | # if an @pathalias already exists, we overwrite (update) it | ||||
if path.startswith(b"file:"): | if path.startswith(b"file:"): | ||||
path = util.url(path).path | path = util.url(path).path | ||||
for p, u in ui.configitems(b"paths"): | for p, u in ui.configitems(b"paths"): | ||||
if u.startswith(b"file:"): | if u.startswith(b"file:"): | ||||
key = lambda t: (t[0], t[1] or b'') | key = lambda t: (t[0], t[1] or b'') | ||||
for b, node, writer, msg in sorted(changed, key=key): | for b, node, writer, msg in sorted(changed, key=key): | ||||
changes.append((b, node)) | changes.append((b, node)) | ||||
writer(msg) | writer(msg) | ||||
localmarks.applychanges(repo, tr, changes) | localmarks.applychanges(repo, tr, changes) | ||||
def incoming(ui, repo, peer): | def incoming(ui, repo, peer): | ||||
'''Show bookmarks incoming from other to repo | """Show bookmarks incoming from other to repo""" | ||||
''' | |||||
ui.status(_(b"searching for changed bookmarks\n")) | ui.status(_(b"searching for changed bookmarks\n")) | ||||
with peer.commandexecutor() as e: | with peer.commandexecutor() as e: | ||||
remotemarks = unhexlifybookmarks( | remotemarks = unhexlifybookmarks( | ||||
e.callcommand(b'listkeys', {b'namespace': b'bookmarks',}).result() | e.callcommand( | ||||
b'listkeys', | |||||
{ | |||||
b'namespace': b'bookmarks', | |||||
}, | |||||
).result() | |||||
) | ) | ||||
r = comparebookmarks(repo, remotemarks, repo._bookmarks) | r = comparebookmarks(repo, remotemarks, repo._bookmarks) | ||||
addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r | addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r | ||||
incomings = [] | incomings = [] | ||||
if ui.debugflag: | if ui.debugflag: | ||||
getid = lambda id: id | getid = lambda id: id | ||||
for s in sorted(incomings): | for s in sorted(incomings): | ||||
ui.write(s) | ui.write(s) | ||||
return 0 | return 0 | ||||
def outgoing(ui, repo, other): | def outgoing(ui, repo, other): | ||||
'''Show bookmarks outgoing from repo to other | """Show bookmarks outgoing from repo to other""" | ||||
''' | |||||
ui.status(_(b"searching for changed bookmarks\n")) | ui.status(_(b"searching for changed bookmarks\n")) | ||||
remotemarks = unhexlifybookmarks(other.listkeys(b'bookmarks')) | remotemarks = unhexlifybookmarks(other.listkeys(b'bookmarks')) | ||||
r = comparebookmarks(repo, repo._bookmarks, remotemarks) | r = comparebookmarks(repo, repo._bookmarks, remotemarks) | ||||
addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r | addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r | ||||
outgoings = [] | outgoings = [] | ||||
if ui.debugflag: | if ui.debugflag: | ||||
for s in sorted(outgoings): | for s in sorted(outgoings): | ||||
ui.write(s) | ui.write(s) | ||||
return 0 | return 0 | ||||
def summary(repo, peer): | def summary(repo, peer): | ||||
'''Compare bookmarks between repo and other for "hg summary" output | """Compare bookmarks between repo and other for "hg summary" output | ||||
This returns "(# of incoming, # of outgoing)" tuple. | This returns "(# of incoming, # of outgoing)" tuple. | ||||
''' | """ | ||||
with peer.commandexecutor() as e: | with peer.commandexecutor() as e: | ||||
remotemarks = unhexlifybookmarks( | remotemarks = unhexlifybookmarks( | ||||
e.callcommand(b'listkeys', {b'namespace': b'bookmarks',}).result() | e.callcommand( | ||||
b'listkeys', | |||||
{ | |||||
b'namespace': b'bookmarks', | |||||
}, | |||||
).result() | |||||
) | ) | ||||
r = comparebookmarks(repo, remotemarks, repo._bookmarks) | r = comparebookmarks(repo, remotemarks, repo._bookmarks) | ||||
addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r | addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r | ||||
return (len(addsrc), len(adddst)) | return (len(addsrc), len(adddst)) | ||||
def validdest(repo, old, new): | def validdest(repo, old, new): |
List, | List, | ||||
Optional, | Optional, | ||||
Set, | Set, | ||||
Tuple, | Tuple, | ||||
Union, | Union, | ||||
) | ) | ||||
assert any( | assert any( | ||||
(Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union,) | ( | ||||
Any, | |||||
Callable, | |||||
Dict, | |||||
Iterable, | |||||
List, | |||||
Optional, | |||||
Set, | |||||
Tuple, | |||||
Union, | |||||
) | |||||
) | ) | ||||
subsettable = repoviewutil.subsettable | subsettable = repoviewutil.subsettable | ||||
calcsize = struct.calcsize | calcsize = struct.calcsize | ||||
pack_into = struct.pack_into | pack_into = struct.pack_into | ||||
unpack_from = struct.unpack_from | unpack_from = struct.unpack_from | ||||
cache.write(rview) | cache.write(rview) | ||||
return | return | ||||
def clear(self): | def clear(self): | ||||
self._per_filter.clear() | self._per_filter.clear() | ||||
def _unknownnode(node): | def _unknownnode(node): | ||||
""" raises ValueError when branchcache found a node which does not exists | """raises ValueError when branchcache found a node which does not exists""" | ||||
""" | |||||
raise ValueError('node %s does not exist' % pycompat.sysstr(hex(node))) | raise ValueError('node %s does not exist' % pycompat.sysstr(hex(node))) | ||||
def _branchcachedesc(repo): | def _branchcachedesc(repo): | ||||
if repo.filtername is not None: | if repo.filtername is not None: | ||||
return b'branch cache (%s)' % repo.filtername | return b'branch cache (%s)' % repo.filtername | ||||
else: | else: | ||||
return b'branch cache' | return b'branch cache' | ||||
entries=(), | entries=(), | ||||
tipnode=nullid, | tipnode=nullid, | ||||
tiprev=nullrev, | tiprev=nullrev, | ||||
filteredhash=None, | filteredhash=None, | ||||
closednodes=None, | closednodes=None, | ||||
hasnode=None, | hasnode=None, | ||||
): | ): | ||||
# type: (Union[Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]], bytes, int, Optional[bytes], Optional[Set[bytes]], Optional[Callable[[bytes], bool]]) -> None | # type: (Union[Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]], bytes, int, Optional[bytes], Optional[Set[bytes]], Optional[Callable[[bytes], bool]]) -> None | ||||
""" hasnode is a function which can be used to verify whether changelog | """hasnode is a function which can be used to verify whether changelog | ||||
has a given node or not. If it's not provided, we assume that every node | has a given node or not. If it's not provided, we assume that every node | ||||
we have exists in changelog """ | we have exists in changelog""" | ||||
self.tipnode = tipnode | self.tipnode = tipnode | ||||
self.tiprev = tiprev | self.tiprev = tiprev | ||||
self.filteredhash = filteredhash | self.filteredhash = filteredhash | ||||
# closednodes is a set of nodes that close their branch. If the branch | # closednodes is a set of nodes that close their branch. If the branch | ||||
# cache has been updated, it may contain nodes that are no longer | # cache has been updated, it may contain nodes that are no longer | ||||
# heads. | # heads. | ||||
if closednodes is None: | if closednodes is None: | ||||
self._closednodes = set() | self._closednodes = set() | ||||
finally: | finally: | ||||
if f: | if f: | ||||
f.close() | f.close() | ||||
return bcache | return bcache | ||||
def load(self, repo, lineiter): | def load(self, repo, lineiter): | ||||
""" fully loads the branchcache by reading from the file using the line | """fully loads the branchcache by reading from the file using the line | ||||
iterator passed""" | iterator passed""" | ||||
for line in lineiter: | for line in lineiter: | ||||
line = line.rstrip(b'\n') | line = line.rstrip(b'\n') | ||||
if not line: | if not line: | ||||
continue | continue | ||||
node, state, label = line.split(b" ", 2) | node, state, label = line.split(b" ", 2) | ||||
if state not in b'oc': | if state not in b'oc': | ||||
raise ValueError('invalid branch state') | raise ValueError('invalid branch state') | ||||
try: | try: | ||||
return (self.tipnode == repo.changelog.node(self.tiprev)) and ( | return (self.tipnode == repo.changelog.node(self.tiprev)) and ( | ||||
self.filteredhash == scmutil.filteredhash(repo, self.tiprev) | self.filteredhash == scmutil.filteredhash(repo, self.tiprev) | ||||
) | ) | ||||
except IndexError: | except IndexError: | ||||
return False | return False | ||||
def _branchtip(self, heads): | def _branchtip(self, heads): | ||||
'''Return tuple with last open head in heads and false, | """Return tuple with last open head in heads and false, | ||||
otherwise return last closed head and true.''' | otherwise return last closed head and true.""" | ||||
tip = heads[-1] | tip = heads[-1] | ||||
closed = True | closed = True | ||||
for h in reversed(heads): | for h in reversed(heads): | ||||
if h not in self._closednodes: | if h not in self._closednodes: | ||||
tip = h | tip = h | ||||
closed = False | closed = False | ||||
break | break | ||||
return tip, closed | return tip, closed | ||||
def branchtip(self, branch): | def branchtip(self, branch): | ||||
'''Return the tipmost open head on branch head, otherwise return the | """Return the tipmost open head on branch head, otherwise return the | ||||
tipmost closed head on branch. | tipmost closed head on branch. | ||||
Raise KeyError for unknown branch.''' | Raise KeyError for unknown branch.""" | ||||
return self._branchtip(self[branch])[0] | return self._branchtip(self[branch])[0] | ||||
def iteropen(self, nodes): | def iteropen(self, nodes): | ||||
return (n for n in nodes if n not in self._closednodes) | return (n for n in nodes if n not in self._closednodes) | ||||
def branchheads(self, branch, closed=False): | def branchheads(self, branch, closed=False): | ||||
self._verifybranch(branch) | self._verifybranch(branch) | ||||
heads = self._entries[branch] | heads = self._entries[branch] |
def processparts(repo, op, unbundler): | def processparts(repo, op, unbundler): | ||||
with partiterator(repo, op, unbundler) as parts: | with partiterator(repo, op, unbundler) as parts: | ||||
for part in parts: | for part in parts: | ||||
_processpart(op, part) | _processpart(op, part) | ||||
def _processchangegroup(op, cg, tr, source, url, **kwargs): | def _processchangegroup(op, cg, tr, source, url, **kwargs): | ||||
ret = cg.apply(op.repo, tr, source, url, **kwargs) | ret = cg.apply(op.repo, tr, source, url, **kwargs) | ||||
op.records.add(b'changegroup', {b'return': ret,}) | op.records.add( | ||||
b'changegroup', | |||||
{ | |||||
b'return': ret, | |||||
}, | |||||
) | |||||
return ret | return ret | ||||
def _gethandler(op, part): | def _gethandler(op, part): | ||||
status = b'unknown' # used by debug output | status = b'unknown' # used by debug output | ||||
try: | try: | ||||
handler = parthandlermapping.get(part.type) | handler = parthandlermapping.get(part.type) | ||||
if handler is None: | if handler is None: | ||||
raw = remote.capable(b'bundle2') | raw = remote.capable(b'bundle2') | ||||
if not raw and raw != b'': | if not raw and raw != b'': | ||||
return {} | return {} | ||||
capsblob = urlreq.unquote(remote.capable(b'bundle2')) | capsblob = urlreq.unquote(remote.capable(b'bundle2')) | ||||
return decodecaps(capsblob) | return decodecaps(capsblob) | ||||
def obsmarkersversion(caps): | def obsmarkersversion(caps): | ||||
"""extract the list of supported obsmarkers versions from a bundle2caps dict | """extract the list of supported obsmarkers versions from a bundle2caps dict""" | ||||
""" | |||||
obscaps = caps.get(b'obsmarkers', ()) | obscaps = caps.get(b'obsmarkers', ()) | ||||
return [int(c[1:]) for c in obscaps if c.startswith(b'V')] | return [int(c[1:]) for c in obscaps if c.startswith(b'V')] | ||||
def writenewbundle( | def writenewbundle( | ||||
ui, | ui, | ||||
repo, | repo, | ||||
source, | source, |
msg = _(b'Unsupported changegroup version: %s') | msg = _(b'Unsupported changegroup version: %s') | ||||
raise error.Abort(msg % version) | raise error.Abort(msg % version) | ||||
if bundle.compressed(): | if bundle.compressed(): | ||||
cgstream = self._writetempbundle(part.read, b'.cg%sun' % version) | cgstream = self._writetempbundle(part.read, b'.cg%sun' % version) | ||||
self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN') | self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN') | ||||
def _writetempbundle(self, readfn, suffix, header=b''): | def _writetempbundle(self, readfn, suffix, header=b''): | ||||
"""Write a temporary file to disk | """Write a temporary file to disk""" | ||||
""" | |||||
fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix) | fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix) | ||||
self.tempfile = temp | self.tempfile = temp | ||||
with os.fdopen(fdtemp, 'wb') as fptemp: | with os.fdopen(fdtemp, 'wb') as fptemp: | ||||
fptemp.write(header) | fptemp.write(header) | ||||
while True: | while True: | ||||
chunk = readfn(2 ** 18) | chunk = readfn(2 ** 18) | ||||
if not chunk: | if not chunk: | ||||
def release(self): | def release(self): | ||||
raise NotImplementedError | raise NotImplementedError | ||||
def getremotechanges( | def getremotechanges( | ||||
ui, repo, peer, onlyheads=None, bundlename=None, force=False | ui, repo, peer, onlyheads=None, bundlename=None, force=False | ||||
): | ): | ||||
'''obtains a bundle of changes incoming from peer | """obtains a bundle of changes incoming from peer | ||||
"onlyheads" restricts the returned changes to those reachable from the | "onlyheads" restricts the returned changes to those reachable from the | ||||
specified heads. | specified heads. | ||||
"bundlename", if given, stores the bundle to this file path permanently; | "bundlename", if given, stores the bundle to this file path permanently; | ||||
otherwise it's stored to a temp file and gets deleted again when you call | otherwise it's stored to a temp file and gets deleted again when you call | ||||
the returned "cleanupfn". | the returned "cleanupfn". | ||||
"force" indicates whether to proceed on unrelated repos. | "force" indicates whether to proceed on unrelated repos. | ||||
Returns a tuple (local, csets, cleanupfn): | Returns a tuple (local, csets, cleanupfn): | ||||
"local" is a local repo from which to obtain the actual incoming | "local" is a local repo from which to obtain the actual incoming | ||||
changesets; it is a bundlerepo for the obtained bundle when the | changesets; it is a bundlerepo for the obtained bundle when the | ||||
original "peer" is remote. | original "peer" is remote. | ||||
"csets" lists the incoming changeset node ids. | "csets" lists the incoming changeset node ids. | ||||
"cleanupfn" must be called without arguments when you're done processing | "cleanupfn" must be called without arguments when you're done processing | ||||
the changes; it closes both the original "peer" and the one returned | the changes; it closes both the original "peer" and the one returned | ||||
here. | here. | ||||
''' | """ | ||||
tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force) | tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force) | ||||
common, incoming, rheads = tmp | common, incoming, rheads = tmp | ||||
if not incoming: | if not incoming: | ||||
try: | try: | ||||
if bundlename: | if bundlename: | ||||
os.unlink(bundlename) | os.unlink(bundlename) | ||||
except OSError: | except OSError: | ||||
pass | pass | ||||
}, | }, | ||||
).result() | ).result() | ||||
elif onlyheads is None and not peer.capable(b'changegroupsubset'): | elif onlyheads is None and not peer.capable(b'changegroupsubset'): | ||||
# compat with older servers when pulling all remote heads | # compat with older servers when pulling all remote heads | ||||
with peer.commandexecutor() as e: | with peer.commandexecutor() as e: | ||||
cg = e.callcommand( | cg = e.callcommand( | ||||
b'changegroup', | b'changegroup', | ||||
{b'nodes': incoming, b'source': b'incoming',}, | { | ||||
b'nodes': incoming, | |||||
b'source': b'incoming', | |||||
}, | |||||
).result() | ).result() | ||||
rheads = None | rheads = None | ||||
else: | else: | ||||
with peer.commandexecutor() as e: | with peer.commandexecutor() as e: | ||||
cg = e.callcommand( | cg = e.callcommand( | ||||
b'changegroupsubset', | b'changegroupsubset', | ||||
{ | { | ||||
csets = localrepo.changelog.findmissing(common, rheads) | csets = localrepo.changelog.findmissing(common, rheads) | ||||
if bundlerepo: | if bundlerepo: | ||||
reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]] | reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]] | ||||
with peer.commandexecutor() as e: | with peer.commandexecutor() as e: | ||||
remotephases = e.callcommand( | remotephases = e.callcommand( | ||||
b'listkeys', {b'namespace': b'phases',} | b'listkeys', | ||||
{ | |||||
b'namespace': b'phases', | |||||
}, | |||||
).result() | ).result() | ||||
pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes) | pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes) | ||||
pullop.trmanager = bundletransactionmanager() | pullop.trmanager = bundletransactionmanager() | ||||
exchange._pullapplyphases(pullop, remotephases) | exchange._pullapplyphases(pullop, remotephases) | ||||
def cleanup(): | def cleanup(): | ||||
if bundlerepo: | if bundlerepo: | ||||
bundlerepo.close() | bundlerepo.close() | ||||
if bundle: | if bundle: | ||||
os.unlink(bundle) | os.unlink(bundle) | ||||
peer.close() | peer.close() | ||||
return (localrepo, csets, cleanup) | return (localrepo, csets, cleanup) |
def stripdesc(desc): | def stripdesc(desc): | ||||
"""strip trailing whitespace and leading and trailing empty lines""" | """strip trailing whitespace and leading and trailing empty lines""" | ||||
return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n') | return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n') | ||||
class appender(object): | class appender(object): | ||||
'''the changelog index must be updated last on disk, so we use this class | """the changelog index must be updated last on disk, so we use this class | ||||
to delay writes to it''' | to delay writes to it""" | ||||
def __init__(self, vfs, name, mode, buf): | def __init__(self, vfs, name, mode, buf): | ||||
self.data = buf | self.data = buf | ||||
fp = vfs(name, mode) | fp = vfs(name, mode) | ||||
self.fp = fp | self.fp = fp | ||||
self.offset = fp.tell() | self.offset = fp.tell() | ||||
self.size = vfs.fstat(fp).st_size | self.size = vfs.fstat(fp).st_size | ||||
self._end = self.size | self._end = self.size |
except crecordmod.fallbackerror as e: | except crecordmod.fallbackerror as e: | ||||
ui.warn(b'%s\n' % e) | ui.warn(b'%s\n' % e) | ||||
ui.warn(_(b'falling back to text mode\n')) | ui.warn(_(b'falling back to text mode\n')) | ||||
return patch.filterpatch(ui, originalhunks, match, operation) | return patch.filterpatch(ui, originalhunks, match, operation) | ||||
def recordfilter(ui, originalhunks, match, operation=None): | def recordfilter(ui, originalhunks, match, operation=None): | ||||
""" Prompts the user to filter the originalhunks and return a list of | """Prompts the user to filter the originalhunks and return a list of | ||||
selected hunks. | selected hunks. | ||||
*operation* is used for to build ui messages to indicate the user what | *operation* is used for to build ui messages to indicate the user what | ||||
kind of filtering they are doing: reverting, committing, shelving, etc. | kind of filtering they are doing: reverting, committing, shelving, etc. | ||||
(see patch.filterpatch). | (see patch.filterpatch). | ||||
""" | """ | ||||
usecurses = crecordmod.checkcurses(ui) | usecurses = crecordmod.checkcurses(ui) | ||||
testfile = ui.config(b'experimental', b'crecordtest') | testfile = ui.config(b'experimental', b'crecordtest') | ||||
oldwrite = setupwrapcolorwrite(ui) | oldwrite = setupwrapcolorwrite(ui) | ||||
oldp, p = p, os.path.dirname(p) | oldp, p = p, os.path.dirname(p) | ||||
if p == oldp: | if p == oldp: | ||||
return None | return None | ||||
return p | return p | ||||
def bailifchanged(repo, merge=True, hint=None): | def bailifchanged(repo, merge=True, hint=None): | ||||
""" enforce the precondition that working directory must be clean. | """enforce the precondition that working directory must be clean. | ||||
'merge' can be set to false if a pending uncommitted merge should be | 'merge' can be set to false if a pending uncommitted merge should be | ||||
ignored (such as when 'update --check' runs). | ignored (such as when 'update --check' runs). | ||||
'hint' is the usual hint given to Abort exception. | 'hint' is the usual hint given to Abort exception. | ||||
""" | """ | ||||
if merge and repo.dirstate.p2() != nullid: | if merge and repo.dirstate.p2() != nullid: | ||||
repo, | repo, | ||||
revs, | revs, | ||||
basefm, | basefm, | ||||
fntemplate=b'hg-%h.patch', | fntemplate=b'hg-%h.patch', | ||||
switch_parent=False, | switch_parent=False, | ||||
opts=None, | opts=None, | ||||
match=None, | match=None, | ||||
): | ): | ||||
'''export changesets as hg patches | """export changesets as hg patches | ||||
Args: | Args: | ||||
repo: The repository from which we're exporting revisions. | repo: The repository from which we're exporting revisions. | ||||
revs: A list of revisions to export as revision numbers. | revs: A list of revisions to export as revision numbers. | ||||
basefm: A formatter to which patches should be written. | basefm: A formatter to which patches should be written. | ||||
fntemplate: An optional string to use for generating patch file names. | fntemplate: An optional string to use for generating patch file names. | ||||
switch_parent: If True, show diffs against second parent when not nullid. | switch_parent: If True, show diffs against second parent when not nullid. | ||||
Default is false, which always shows diff against p1. | Default is false, which always shows diff against p1. | ||||
opts: diff options to use for generating the patch. | opts: diff options to use for generating the patch. | ||||
match: If specified, only export changes to files matching this matcher. | match: If specified, only export changes to files matching this matcher. | ||||
Returns: | Returns: | ||||
Nothing. | Nothing. | ||||
Side Effect: | Side Effect: | ||||
"HG Changeset Patch" data is emitted to one of the following | "HG Changeset Patch" data is emitted to one of the following | ||||
destinations: | destinations: | ||||
fntemplate specified: Each rev is written to a unique file named using | fntemplate specified: Each rev is written to a unique file named using | ||||
the given template. | the given template. | ||||
Otherwise: All revs will be written to basefm. | Otherwise: All revs will be written to basefm. | ||||
''' | """ | ||||
_prefetchchangedfiles(repo, revs, match) | _prefetchchangedfiles(repo, revs, match) | ||||
if not fntemplate: | if not fntemplate: | ||||
_exportfile( | _exportfile( | ||||
repo, revs, basefm, b'<unnamed>', switch_parent, opts, match | repo, revs, basefm, b'<unnamed>', switch_parent, opts, match | ||||
) | ) | ||||
else: | else: | ||||
_exportfntemplate( | _exportfntemplate( | ||||
if not opts.get(b'dry_run'): | if not opts.get(b'dry_run'): | ||||
needdata = (b'revert', b'add', b'undelete') | needdata = (b'revert', b'add', b'undelete') | ||||
oplist = [actions[name][0] for name in needdata] | oplist = [actions[name][0] for name in needdata] | ||||
prefetch = scmutil.prefetchfiles | prefetch = scmutil.prefetchfiles | ||||
matchfiles = scmutil.matchfiles( | matchfiles = scmutil.matchfiles( | ||||
repo, [f for sublist in oplist for f in sublist] | repo, [f for sublist in oplist for f in sublist] | ||||
) | ) | ||||
prefetch( | prefetch( | ||||
repo, [(ctx.rev(), matchfiles)], | repo, | ||||
[(ctx.rev(), matchfiles)], | |||||
) | ) | ||||
match = scmutil.match(repo[None], pats) | match = scmutil.match(repo[None], pats) | ||||
_performrevert( | _performrevert( | ||||
repo, | repo, | ||||
ctx, | ctx, | ||||
names, | names, | ||||
uipathfn, | uipathfn, | ||||
actions, | actions, | ||||
# | # | ||||
# otherwise, 'changes' is a tuple of tuples below: | # otherwise, 'changes' is a tuple of tuples below: | ||||
# - (sourceurl, sourcebranch, sourcepeer, incoming) | # - (sourceurl, sourcebranch, sourcepeer, incoming) | ||||
# - (desturl, destbranch, destpeer, outgoing) | # - (desturl, destbranch, destpeer, outgoing) | ||||
summaryremotehooks = util.hooks() | summaryremotehooks = util.hooks() | ||||
def checkunfinished(repo, commit=False, skipmerge=False): | def checkunfinished(repo, commit=False, skipmerge=False): | ||||
'''Look for an unfinished multistep operation, like graft, and abort | """Look for an unfinished multistep operation, like graft, and abort | ||||
if found. It's probably good to check this right before | if found. It's probably good to check this right before | ||||
bailifchanged(). | bailifchanged(). | ||||
''' | """ | ||||
# Check for non-clearable states first, so things like rebase will take | # Check for non-clearable states first, so things like rebase will take | ||||
# precedence over update. | # precedence over update. | ||||
for state in statemod._unfinishedstates: | for state in statemod._unfinishedstates: | ||||
if ( | if ( | ||||
state._clearable | state._clearable | ||||
or (commit and state._allowcommit) | or (commit and state._allowcommit) | ||||
or state._reportonly | or state._reportonly | ||||
): | ): | ||||
or s._reportonly | or s._reportonly | ||||
): | ): | ||||
continue | continue | ||||
if s.isunfinished(repo): | if s.isunfinished(repo): | ||||
raise error.StateError(s.msg(), hint=s.hint()) | raise error.StateError(s.msg(), hint=s.hint()) | ||||
def clearunfinished(repo): | def clearunfinished(repo): | ||||
'''Check for unfinished operations (as above), and clear the ones | """Check for unfinished operations (as above), and clear the ones | ||||
that are clearable. | that are clearable. | ||||
''' | """ | ||||
for state in statemod._unfinishedstates: | for state in statemod._unfinishedstates: | ||||
if state._reportonly: | if state._reportonly: | ||||
continue | continue | ||||
if not state._clearable and state.isunfinished(repo): | if not state._clearable and state.isunfinished(repo): | ||||
raise error.StateError(state.msg(), hint=state.hint()) | raise error.StateError(state.msg(), hint=state.hint()) | ||||
for s in statemod._unfinishedstates: | for s in statemod._unfinishedstates: | ||||
if s._opname == b'merge' or state._reportonly: | if s._opname == b'merge' or state._reportonly: | ||||
continue | continue | ||||
if s._clearable and s.isunfinished(repo): | if s._clearable and s.isunfinished(repo): | ||||
util.unlink(repo.vfs.join(s._fname)) | util.unlink(repo.vfs.join(s._fname)) | ||||
def getunfinishedstate(repo): | def getunfinishedstate(repo): | ||||
''' Checks for unfinished operations and returns statecheck object | """Checks for unfinished operations and returns statecheck object | ||||
for it''' | for it""" | ||||
for state in statemod._unfinishedstates: | for state in statemod._unfinishedstates: | ||||
if state.isunfinished(repo): | if state.isunfinished(repo): | ||||
return state | return state | ||||
return None | return None | ||||
def howtocontinue(repo): | def howtocontinue(repo): | ||||
'''Check for an unfinished operation and return the command to finish | """Check for an unfinished operation and return the command to finish | ||||
it. | it. | ||||
statemod._unfinishedstates list is checked for an unfinished operation | statemod._unfinishedstates list is checked for an unfinished operation | ||||
and the corresponding message to finish it is generated if a method to | and the corresponding message to finish it is generated if a method to | ||||
continue is supported by the operation. | continue is supported by the operation. | ||||
Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is | Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is | ||||
a boolean. | a boolean. | ||||
''' | """ | ||||
contmsg = _(b"continue: %s") | contmsg = _(b"continue: %s") | ||||
for state in statemod._unfinishedstates: | for state in statemod._unfinishedstates: | ||||
if not state._continueflag: | if not state._continueflag: | ||||
continue | continue | ||||
if state.isunfinished(repo): | if state.isunfinished(repo): | ||||
return contmsg % state.continuemsg(), True | return contmsg % state.continuemsg(), True | ||||
if repo[None].dirty(missing=True, merge=False, branch=False): | if repo[None].dirty(missing=True, merge=False, branch=False): | ||||
return contmsg % _(b"hg commit"), False | return contmsg % _(b"hg commit"), False | ||||
return None, None | return None, None | ||||
def checkafterresolved(repo): | def checkafterresolved(repo): | ||||
'''Inform the user about the next action after completing hg resolve | """Inform the user about the next action after completing hg resolve | ||||
If there's a an unfinished operation that supports continue flag, | If there's a an unfinished operation that supports continue flag, | ||||
howtocontinue will yield repo.ui.warn as the reporter. | howtocontinue will yield repo.ui.warn as the reporter. | ||||
Otherwise, it will yield repo.ui.note. | Otherwise, it will yield repo.ui.note. | ||||
''' | """ | ||||
msg, warning = howtocontinue(repo) | msg, warning = howtocontinue(repo) | ||||
if msg is not None: | if msg is not None: | ||||
if warning: | if warning: | ||||
repo.ui.warn(b"%s\n" % msg) | repo.ui.warn(b"%s\n" % msg) | ||||
else: | else: | ||||
repo.ui.note(b"%s\n" % msg) | repo.ui.note(b"%s\n" % msg) | ||||
def wrongtooltocontinue(repo, task): | def wrongtooltocontinue(repo, task): | ||||
'''Raise an abort suggesting how to properly continue if there is an | """Raise an abort suggesting how to properly continue if there is an | ||||
active task. | active task. | ||||
Uses howtocontinue() to find the active task. | Uses howtocontinue() to find the active task. | ||||
If there's no task (repo.ui.note for 'hg commit'), it does not offer | If there's no task (repo.ui.note for 'hg commit'), it does not offer | ||||
a hint. | a hint. | ||||
''' | """ | ||||
after = howtocontinue(repo) | after = howtocontinue(repo) | ||||
hint = None | hint = None | ||||
if after[1]: | if after[1]: | ||||
hint = after[0] | hint = after[0] | ||||
raise error.StateError(_(b'no %s in progress') % task, hint=hint) | raise error.StateError(_(b'no %s in progress') % task, hint=hint) | ||||
def abortgraft(ui, repo, graftstate): | def abortgraft(ui, repo, graftstate): |
(b't', b'type', b'', _(b'type of distribution to create'), _(b'TYPE')), | (b't', b'type', b'', _(b'type of distribution to create'), _(b'TYPE')), | ||||
] | ] | ||||
+ subrepoopts | + subrepoopts | ||||
+ walkopts, | + walkopts, | ||||
_(b'[OPTION]... DEST'), | _(b'[OPTION]... DEST'), | ||||
helpcategory=command.CATEGORY_IMPORT_EXPORT, | helpcategory=command.CATEGORY_IMPORT_EXPORT, | ||||
) | ) | ||||
def archive(ui, repo, dest, **opts): | def archive(ui, repo, dest, **opts): | ||||
'''create an unversioned archive of a repository revision | """create an unversioned archive of a repository revision | ||||
By default, the revision used is the parent of the working | By default, the revision used is the parent of the working | ||||
directory; use -r/--rev to specify a different revision. | directory; use -r/--rev to specify a different revision. | ||||
The archive type is automatically detected based on file | The archive type is automatically detected based on file | ||||
extension (to override, use -t/--type). | extension (to override, use -t/--type). | ||||
.. container:: verbose | .. container:: verbose | ||||
using a format string; see :hg:`help export` for details. | using a format string; see :hg:`help export` for details. | ||||
Each member added to an archive file has a directory prefix | Each member added to an archive file has a directory prefix | ||||
prepended. Use -p/--prefix to specify a format string for the | prepended. Use -p/--prefix to specify a format string for the | ||||
prefix. The default is the basename of the archive, with suffixes | prefix. The default is the basename of the archive, with suffixes | ||||
removed. | removed. | ||||
Returns 0 on success. | Returns 0 on success. | ||||
''' | """ | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
rev = opts.get(b'rev') | rev = opts.get(b'rev') | ||||
if rev: | if rev: | ||||
repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') | repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn') | ||||
ctx = scmutil.revsingle(repo, rev) | ctx = scmutil.revsingle(repo, rev) | ||||
if not ctx: | if not ctx: | ||||
raise error.InputError( | raise error.InputError( | ||||
+ mergetoolopts | + mergetoolopts | ||||
+ walkopts | + walkopts | ||||
+ commitopts | + commitopts | ||||
+ commitopts2, | + commitopts2, | ||||
_(b'[OPTION]... [-r] REV'), | _(b'[OPTION]... [-r] REV'), | ||||
helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, | helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, | ||||
) | ) | ||||
def backout(ui, repo, node=None, rev=None, **opts): | def backout(ui, repo, node=None, rev=None, **opts): | ||||
'''reverse effect of earlier changeset | """reverse effect of earlier changeset | ||||
Prepare a new changeset with the effect of REV undone in the | Prepare a new changeset with the effect of REV undone in the | ||||
current working directory. If no conflicts were encountered, | current working directory. If no conflicts were encountered, | ||||
it will be committed immediately. | it will be committed immediately. | ||||
If REV is the parent of the working directory, then this new changeset | If REV is the parent of the working directory, then this new changeset | ||||
is committed automatically (unless --no-commit is specified). | is committed automatically (unless --no-commit is specified). | ||||
See :hg:`help dates` for a list of formats valid for -d/--date. | See :hg:`help dates` for a list of formats valid for -d/--date. | ||||
See :hg:`help revert` for a way to restore files to the state | See :hg:`help revert` for a way to restore files to the state | ||||
of another revision. | of another revision. | ||||
Returns 0 on success, 1 if nothing to backout or there are unresolved | Returns 0 on success, 1 if nothing to backout or there are unresolved | ||||
files. | files. | ||||
''' | """ | ||||
with repo.wlock(), repo.lock(): | with repo.wlock(), repo.lock(): | ||||
return _dobackout(ui, repo, node, rev, **opts) | return _dobackout(ui, repo, node, rev, **opts) | ||||
def _dobackout(ui, repo, node=None, rev=None, **opts): | def _dobackout(ui, repo, node=None, rev=None, **opts): | ||||
cmdutil.check_incompatible_arguments(opts, 'no_commit', ['commit', 'merge']) | cmdutil.check_incompatible_arguments(opts, 'no_commit', ['commit', 'merge']) | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
(b'i', b'inactive', False, _(b'mark a bookmark inactive')), | (b'i', b'inactive', False, _(b'mark a bookmark inactive')), | ||||
(b'l', b'list', False, _(b'list existing bookmarks')), | (b'l', b'list', False, _(b'list existing bookmarks')), | ||||
] | ] | ||||
+ formatteropts, | + formatteropts, | ||||
_(b'hg bookmarks [OPTIONS]... [NAME]...'), | _(b'hg bookmarks [OPTIONS]... [NAME]...'), | ||||
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | helpcategory=command.CATEGORY_CHANGE_ORGANIZATION, | ||||
) | ) | ||||
def bookmark(ui, repo, *names, **opts): | def bookmark(ui, repo, *names, **opts): | ||||
'''create a new bookmark or list existing bookmarks | """create a new bookmark or list existing bookmarks | ||||
Bookmarks are labels on changesets to help track lines of development. | Bookmarks are labels on changesets to help track lines of development. | ||||
Bookmarks are unversioned and can be moved, renamed and deleted. | Bookmarks are unversioned and can be moved, renamed and deleted. | ||||
Deleting or moving a bookmark has no effect on the associated changesets. | Deleting or moving a bookmark has no effect on the associated changesets. | ||||
Creating or updating to a bookmark causes it to be marked as 'active'. | Creating or updating to a bookmark causes it to be marked as 'active'. | ||||
The active bookmark is indicated with a '*'. | The active bookmark is indicated with a '*'. | ||||
When a commit is made, the active bookmark will advance to the new commit. | When a commit is made, the active bookmark will advance to the new commit. | ||||
- move the '@' bookmark from another branch:: | - move the '@' bookmark from another branch:: | ||||
hg book -f @ | hg book -f @ | ||||
- print only the active bookmark name:: | - print only the active bookmark name:: | ||||
hg book -ql . | hg book -ql . | ||||
''' | """ | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
force = opts.get(b'force') | force = opts.get(b'force') | ||||
rev = opts.get(b'rev') | rev = opts.get(b'rev') | ||||
inactive = opts.get(b'inactive') # meaning add/rename to inactive bookmark | inactive = opts.get(b'inactive') # meaning add/rename to inactive bookmark | ||||
action = cmdutil.check_at_most_one_arg(opts, b'delete', b'rename', b'list') | action = cmdutil.check_at_most_one_arg(opts, b'delete', b'rename', b'list') | ||||
if action: | if action: | ||||
cmdutil.check_incompatible_arguments(opts, action, [b'rev']) | cmdutil.check_incompatible_arguments(opts, action, [b'rev']) | ||||
with ui.formatter(b'files', opts) as fm: | with ui.formatter(b'files', opts) as fm: | ||||
return cmdutil.files( | return cmdutil.files( | ||||
ui, ctx, m, uipathfn, fm, fmt, opts.get(b'subrepos') | ui, ctx, m, uipathfn, fm, fmt, opts.get(b'subrepos') | ||||
) | ) | ||||
@command( | @command( | ||||
b'forget', | b'forget', | ||||
[(b'i', b'interactive', None, _(b'use interactive mode')),] | [ | ||||
(b'i', b'interactive', None, _(b'use interactive mode')), | |||||
] | |||||
+ walkopts | + walkopts | ||||
+ dryrunopts, | + dryrunopts, | ||||
_(b'[OPTION]... FILE...'), | _(b'[OPTION]... FILE...'), | ||||
helpcategory=command.CATEGORY_WORKING_DIRECTORY, | helpcategory=command.CATEGORY_WORKING_DIRECTORY, | ||||
helpbasic=True, | helpbasic=True, | ||||
inferrepo=True, | inferrepo=True, | ||||
) | ) | ||||
def forget(ui, repo, *pats, **opts): | def forget(ui, repo, *pats, **opts): | ||||
] | ] | ||||
+ commitopts2 | + commitopts2 | ||||
+ mergetoolopts | + mergetoolopts | ||||
+ dryrunopts, | + dryrunopts, | ||||
_(b'[OPTION]... [-r REV]... REV...'), | _(b'[OPTION]... [-r REV]... REV...'), | ||||
helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, | helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, | ||||
) | ) | ||||
def graft(ui, repo, *revs, **opts): | def graft(ui, repo, *revs, **opts): | ||||
'''copy changes from other branches onto the current branch | """copy changes from other branches onto the current branch | ||||
This command uses Mercurial's merge logic to copy individual | This command uses Mercurial's merge logic to copy individual | ||||
changes from other branches without merging branches in the | changes from other branches without merging branches in the | ||||
history graph. This is sometimes known as 'backporting' or | history graph. This is sometimes known as 'backporting' or | ||||
'cherry-picking'. By default, graft will copy user, date, and | 'cherry-picking'. By default, graft will copy user, date, and | ||||
description from the source changesets. | description from the source changesets. | ||||
Changesets that are ancestors of the current revision, that have | Changesets that are ancestors of the current revision, that have | ||||
- land a feature branch as one changeset:: | - land a feature branch as one changeset:: | ||||
hg up -cr default | hg up -cr default | ||||
hg graft -r featureX --base "ancestor('featureX', 'default')" | hg graft -r featureX --base "ancestor('featureX', 'default')" | ||||
See :hg:`help revisions` for more about specifying revisions. | See :hg:`help revisions` for more about specifying revisions. | ||||
Returns 0 on successful completion, 1 if there are unresolved files. | Returns 0 on successful completion, 1 if there are unresolved files. | ||||
''' | """ | ||||
with repo.wlock(): | with repo.wlock(): | ||||
return _dograft(ui, repo, *revs, **opts) | return _dograft(ui, repo, *revs, **opts) | ||||
def _dograft(ui, repo, *revs, **opts): | def _dograft(ui, repo, *revs, **opts): | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
if revs and opts.get(b'rev'): | if revs and opts.get(b'rev'): | ||||
ui.warn( | ui.warn( | ||||
_(b'update to new branch head if new descendants were pulled'), | _(b'update to new branch head if new descendants were pulled'), | ||||
), | ), | ||||
( | ( | ||||
b'f', | b'f', | ||||
b'force', | b'force', | ||||
None, | None, | ||||
_(b'run even when remote repository is unrelated'), | _(b'run even when remote repository is unrelated'), | ||||
), | ), | ||||
(b'', b'confirm', None, _(b'confirm pull before applying changes'),), | ( | ||||
b'', | |||||
b'confirm', | |||||
None, | |||||
_(b'confirm pull before applying changes'), | |||||
), | |||||
( | ( | ||||
b'r', | b'r', | ||||
b'rev', | b'rev', | ||||
[], | [], | ||||
_(b'a remote changeset intended to be added'), | _(b'a remote changeset intended to be added'), | ||||
_(b'REV'), | _(b'REV'), | ||||
), | ), | ||||
(b'B', b'bookmark', [], _(b"bookmark to pull"), _(b'BOOKMARK')), | (b'B', b'bookmark', [], _(b"bookmark to pull"), _(b'BOOKMARK')), | ||||
Returns 0 if push was successful, 1 if nothing to push. | Returns 0 if push was successful, 1 if nothing to push. | ||||
""" | """ | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
if opts.get(b'all_bookmarks'): | if opts.get(b'all_bookmarks'): | ||||
cmdutil.check_incompatible_arguments( | cmdutil.check_incompatible_arguments( | ||||
opts, b'all_bookmarks', [b'bookmark', b'rev'], | opts, | ||||
b'all_bookmarks', | |||||
[b'bookmark', b'rev'], | |||||
) | ) | ||||
opts[b'bookmark'] = list(repo._bookmarks) | opts[b'bookmark'] = list(repo._bookmarks) | ||||
if opts.get(b'bookmark'): | if opts.get(b'bookmark'): | ||||
ui.setconfig(b'bookmarks', b'pushing', opts[b'bookmark'], b'push') | ui.setconfig(b'bookmarks', b'pushing', opts[b'bookmark'], b'push') | ||||
for b in opts[b'bookmark']: | for b in opts[b'bookmark']: | ||||
# translate -B options to -r so changesets get pushed | # translate -B options to -r so changesets get pushed | ||||
b = repo._bookmarks.expandname(b) | b = repo._bookmarks.expandname(b) | ||||
elif not result and pushop.bkresult: | elif not result and pushop.bkresult: | ||||
result = 2 | result = 2 | ||||
return result | return result | ||||
@command( | @command( | ||||
b'recover', | b'recover', | ||||
[(b'', b'verify', False, b"run `hg verify` after successful recover"),], | [ | ||||
(b'', b'verify', False, b"run `hg verify` after successful recover"), | |||||
], | |||||
helpcategory=command.CATEGORY_MAINTENANCE, | helpcategory=command.CATEGORY_MAINTENANCE, | ||||
) | ) | ||||
def recover(ui, repo, **opts): | def recover(ui, repo, **opts): | ||||
"""roll back an interrupted transaction | """roll back an interrupted transaction | ||||
Recover from an interrupted commit or pull. | Recover from an interrupted commit or pull. | ||||
This command tries to fix the repository status after an | This command tries to fix the repository status after an | ||||
), | ), | ||||
), | ), | ||||
] | ] | ||||
+ cmdutil.walkopts, | + cmdutil.walkopts, | ||||
_(b'hg shelve [OPTION]... [FILE]...'), | _(b'hg shelve [OPTION]... [FILE]...'), | ||||
helpcategory=command.CATEGORY_WORKING_DIRECTORY, | helpcategory=command.CATEGORY_WORKING_DIRECTORY, | ||||
) | ) | ||||
def shelve(ui, repo, *pats, **opts): | def shelve(ui, repo, *pats, **opts): | ||||
'''save and set aside changes from the working directory | """save and set aside changes from the working directory | ||||
Shelving takes files that "hg status" reports as not clean, saves | Shelving takes files that "hg status" reports as not clean, saves | ||||
the modifications to a bundle (a shelved change), and reverts the | the modifications to a bundle (a shelved change), and reverts the | ||||
files so that their state in the working directory becomes clean. | files so that their state in the working directory becomes clean. | ||||
To restore these changes to the working directory, using "hg | To restore these changes to the working directory, using "hg | ||||
unshelve"; this will work even if you switch to a different | unshelve"; this will work even if you switch to a different | ||||
commit. | commit. | ||||
branch. To specify a different name, use ``--name``. | branch. To specify a different name, use ``--name``. | ||||
To see a list of existing shelved changes, use the ``--list`` | To see a list of existing shelved changes, use the ``--list`` | ||||
option. For each shelved change, this will print its name, age, | option. For each shelved change, this will print its name, age, | ||||
and description; use ``--patch`` or ``--stat`` for more details. | and description; use ``--patch`` or ``--stat`` for more details. | ||||
To delete specific shelved changes, use ``--delete``. To delete | To delete specific shelved changes, use ``--delete``. To delete | ||||
all shelved changes, use ``--cleanup``. | all shelved changes, use ``--cleanup``. | ||||
''' | """ | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
allowables = [ | allowables = [ | ||||
(b'addremove', {b'create'}), # 'create' is pseudo action | (b'addremove', {b'create'}), # 'create' is pseudo action | ||||
(b'unknown', {b'create'}), | (b'unknown', {b'create'}), | ||||
(b'cleanup', {b'cleanup'}), | (b'cleanup', {b'cleanup'}), | ||||
# ('date', {'create'}), # ignored for passing '--date "0 0"' in tests | # ('date', {'create'}), # ignored for passing '--date "0 0"' in tests | ||||
(b'delete', {b'delete'}), | (b'delete', {b'delete'}), | ||||
(b'edit', {b'create'}), | (b'edit', {b'create'}), | ||||
fn.condwrite(ui.verbose and v, b"ver", b"%s", v) | fn.condwrite(ui.verbose and v, b"ver", b"%s", v) | ||||
if ui.verbose: | if ui.verbose: | ||||
fn.plain(b"\n") | fn.plain(b"\n") | ||||
fn.end() | fn.end() | ||||
fm.end() | fm.end() | ||||
def loadcmdtable(ui, name, cmdtable): | def loadcmdtable(ui, name, cmdtable): | ||||
"""Load command functions from specified cmdtable | """Load command functions from specified cmdtable""" | ||||
""" | |||||
overrides = [cmd for cmd in cmdtable if cmd in table] | overrides = [cmd for cmd in cmdtable if cmd in table] | ||||
if overrides: | if overrides: | ||||
ui.warn( | ui.warn( | ||||
_(b"extension '%s' overrides commands: %s\n") | _(b"extension '%s' overrides commands: %s\n") | ||||
% (name, b" ".join(overrides)) | % (name, b" ".join(overrides)) | ||||
) | ) | ||||
table.update(cmdtable) | table.update(cmdtable) |
finally: | finally: | ||||
signal.signal(signal.SIGINT, signal.SIG_IGN) | signal.signal(signal.SIGINT, signal.SIG_IGN) | ||||
# On KeyboardInterrupt, print error message and exit *after* SIGINT | # On KeyboardInterrupt, print error message and exit *after* SIGINT | ||||
# handler removed. | # handler removed. | ||||
req.ui.error(_(b'interrupted!\n')) | req.ui.error(_(b'interrupted!\n')) | ||||
return -1 | return -1 | ||||
def runcommand(self): | def runcommand(self): | ||||
""" reads a list of \0 terminated arguments, executes | """reads a list of \0 terminated arguments, executes | ||||
and writes the return code to the result channel """ | and writes the return code to the result channel""" | ||||
from . import dispatch # avoid cycle | from . import dispatch # avoid cycle | ||||
args = self._readlist() | args = self._readlist() | ||||
# copy the uis so changes (e.g. --config or --verbose) don't | # copy the uis so changes (e.g. --config or --verbose) don't | ||||
# persist between requests | # persist between requests | ||||
copiedui = self.ui.copy() | copiedui = self.ui.copy() | ||||
uis = [copiedui] | uis = [copiedui] |
p1.node(), | p1.node(), | ||||
p2.node(), | p2.node(), | ||||
user, | user, | ||||
ctx.date(), | ctx.date(), | ||||
extra, | extra, | ||||
) | ) | ||||
xp1, xp2 = p1.hex(), p2 and p2.hex() or b'' | xp1, xp2 = p1.hex(), p2 and p2.hex() or b'' | ||||
repo.hook( | repo.hook( | ||||
b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2, | b'pretxncommit', | ||||
throw=True, | |||||
node=hex(n), | |||||
parent1=xp1, | |||||
parent2=xp2, | |||||
) | ) | ||||
# set the new commit is proper phase | # set the new commit is proper phase | ||||
targetphase = subrepoutil.newcommitphase(repo.ui, ctx) | targetphase = subrepoutil.newcommitphase(repo.ui, ctx) | ||||
# prevent unmarking changesets as public on recommit | # prevent unmarking changesets as public on recommit | ||||
waspublic = oldtip == repo.changelog.tiprev() and not repo[n].phase() | waspublic = oldtip == repo.changelog.tiprev() and not repo[n].phase() | ||||
if targetphase and not waspublic: | if targetphase and not waspublic: | ||||
if writechangesetcopy: | if writechangesetcopy: | ||||
files.update_copies_from_p1(ctx.p1copies()) | files.update_copies_from_p1(ctx.p1copies()) | ||||
files.update_copies_from_p2(ctx.p2copies()) | files.update_copies_from_p2(ctx.p2copies()) | ||||
return mn, files | return mn, files | ||||
def _get_salvaged(repo, ms, ctx): | def _get_salvaged(repo, ms, ctx): | ||||
""" returns a list of salvaged files | """returns a list of salvaged files | ||||
returns empty list if config option which process salvaged files are | returns empty list if config option which process salvaged files are | ||||
not enabled """ | not enabled""" | ||||
salvaged = [] | salvaged = [] | ||||
copy_sd = repo.filecopiesmode == b'changeset-sidedata' | copy_sd = repo.filecopiesmode == b'changeset-sidedata' | ||||
if copy_sd and len(ctx.parents()) > 1: | if copy_sd and len(ctx.parents()) > 1: | ||||
if ms.active(): | if ms.active(): | ||||
for fname in sorted(ms.allextras().keys()): | for fname in sorted(ms.allextras().keys()): | ||||
might_removed = ms.extras(fname).get(b'merge-removal-candidate') | might_removed = ms.extras(fname).get(b'merge-removal-candidate') | ||||
if might_removed == b'yes': | if might_removed == b'yes': | ||||
if fname in ctx: | if fname in ctx: | ||||
files.mark_removed(f) | files.mark_removed(f) | ||||
mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files.touched, added, drop) | mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files.touched, added, drop) | ||||
return mn | return mn | ||||
def _filecommit( | def _filecommit( | ||||
repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta, ms, | repo, | ||||
fctx, | |||||
manifest1, | |||||
manifest2, | |||||
linkrev, | |||||
tr, | |||||
includecopymeta, | |||||
ms, | |||||
): | ): | ||||
""" | """ | ||||
commit an individual file as part of a larger transaction | commit an individual file as part of a larger transaction | ||||
input: | input: | ||||
fctx: a file context with the content we are trying to commit | fctx: a file context with the content we are trying to commit | ||||
manifest1: manifest of changeset first parent | manifest1: manifest of changeset first parent |
message = l.rstrip() | message = l.rstrip() | ||||
if l.startswith(b' '): | if l.startswith(b' '): | ||||
message = b"unexpected leading whitespace: %s" % message | message = b"unexpected leading whitespace: %s" % message | ||||
raise error.ConfigError(message, (b"%s:%d" % (src, line))) | raise error.ConfigError(message, (b"%s:%d" % (src, line))) | ||||
def read(self, path, fp=None, sections=None, remap=None): | def read(self, path, fp=None, sections=None, remap=None): | ||||
if not fp: | if not fp: | ||||
fp = util.posixfile(path, b'rb') | fp = util.posixfile(path, b'rb') | ||||
assert getattr(fp, 'mode', 'rb') == 'rb', ( | assert ( | ||||
b'config files must be opened in binary mode, got fp=%r mode=%r' | getattr(fp, 'mode', 'rb') == 'rb' | ||||
% (fp, fp.mode,) | ), b'config files must be opened in binary mode, got fp=%r mode=%r' % ( | ||||
fp, | |||||
fp.mode, | |||||
) | ) | ||||
dir = os.path.dirname(path) | dir = os.path.dirname(path) | ||||
def include(rel, remap, sections): | def include(rel, remap, sections): | ||||
abs = os.path.normpath(os.path.join(dir, rel)) | abs = os.path.normpath(os.path.join(dir, rel)) | ||||
self.read(abs, remap=remap, sections=sections) | self.read(abs, remap=remap, sections=sections) | ||||
return f | return f | ||||
coreconfigitem = getitemregister(coreitems) | coreconfigitem = getitemregister(coreitems) | ||||
def _registerdiffopts(section, configprefix=b''): | def _registerdiffopts(section, configprefix=b''): | ||||
coreconfigitem( | coreconfigitem( | ||||
section, configprefix + b'nodates', default=False, | section, | ||||
configprefix + b'nodates', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
section, configprefix + b'showfunc', default=False, | section, | ||||
configprefix + b'showfunc', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
section, configprefix + b'unified', default=None, | section, | ||||
configprefix + b'unified', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
section, configprefix + b'git', default=False, | section, | ||||
configprefix + b'git', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
section, configprefix + b'ignorews', default=False, | section, | ||||
configprefix + b'ignorews', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
section, configprefix + b'ignorewsamount', default=False, | section, | ||||
configprefix + b'ignorewsamount', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
section, configprefix + b'ignoreblanklines', default=False, | section, | ||||
configprefix + b'ignoreblanklines', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
section, configprefix + b'ignorewseol', default=False, | section, | ||||
configprefix + b'ignorewseol', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
section, configprefix + b'nobinary', default=False, | section, | ||||
configprefix + b'nobinary', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
section, configprefix + b'noprefix', default=False, | section, | ||||
configprefix + b'noprefix', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
section, configprefix + b'word-diff', default=False, | section, | ||||
configprefix + b'word-diff', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'alias', b'.*', default=dynamicdefault, generic=True, | b'alias', | ||||
b'.*', | |||||
default=dynamicdefault, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'auth', b'cookiefile', default=None, | b'auth', | ||||
b'cookiefile', | |||||
default=None, | |||||
) | ) | ||||
_registerdiffopts(section=b'annotate') | _registerdiffopts(section=b'annotate') | ||||
# bookmarks.pushing: internal hack for discovery | # bookmarks.pushing: internal hack for discovery | ||||
coreconfigitem( | coreconfigitem( | ||||
b'bookmarks', b'pushing', default=list, | b'bookmarks', | ||||
b'pushing', | |||||
default=list, | |||||
) | ) | ||||
# bundle.mainreporoot: internal hack for bundlerepo | # bundle.mainreporoot: internal hack for bundlerepo | ||||
coreconfigitem( | coreconfigitem( | ||||
b'bundle', b'mainreporoot', default=b'', | b'bundle', | ||||
b'mainreporoot', | |||||
default=b'', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'censor', b'policy', default=b'abort', experimental=True, | b'censor', | ||||
b'policy', | |||||
default=b'abort', | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'chgserver', b'idletimeout', default=3600, | b'chgserver', | ||||
b'idletimeout', | |||||
default=3600, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'chgserver', b'skiphash', default=False, | b'chgserver', | ||||
b'skiphash', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'cmdserver', b'log', default=None, | b'cmdserver', | ||||
b'log', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'cmdserver', b'max-log-files', default=7, | b'cmdserver', | ||||
b'max-log-files', | |||||
default=7, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'cmdserver', b'max-log-size', default=b'1 MB', | b'cmdserver', | ||||
b'max-log-size', | |||||
default=b'1 MB', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'cmdserver', b'max-repo-cache', default=0, experimental=True, | b'cmdserver', | ||||
b'max-repo-cache', | |||||
default=0, | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'cmdserver', b'message-encodings', default=list, | b'cmdserver', | ||||
b'message-encodings', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'cmdserver', | b'cmdserver', | ||||
b'track-log', | b'track-log', | ||||
default=lambda: [b'chgserver', b'cmdserver', b'repocache'], | default=lambda: [b'chgserver', b'cmdserver', b'repocache'], | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'cmdserver', b'shutdown-on-interrupt', default=True, | b'cmdserver', | ||||
b'shutdown-on-interrupt', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'color', b'.*', default=None, generic=True, | b'color', | ||||
b'.*', | |||||
default=None, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'color', b'mode', default=b'auto', | b'color', | ||||
b'mode', | |||||
default=b'auto', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'color', b'pagermode', default=dynamicdefault, | b'color', | ||||
b'pagermode', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'command-templates', | b'command-templates', | ||||
b'graphnode', | b'graphnode', | ||||
default=None, | default=None, | ||||
alias=[(b'ui', b'graphnodetemplate')], | alias=[(b'ui', b'graphnodetemplate')], | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'command-templates', b'log', default=None, alias=[(b'ui', b'logtemplate')], | b'command-templates', | ||||
b'log', | |||||
default=None, | |||||
alias=[(b'ui', b'logtemplate')], | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'command-templates', | b'command-templates', | ||||
b'mergemarker', | b'mergemarker', | ||||
default=( | default=( | ||||
b'{node|short} ' | b'{node|short} ' | ||||
b'{ifeq(tags, "tip", "", ' | b'{ifeq(tags, "tip", "", ' | ||||
b'ifeq(tags, "", "", "{tags} "))}' | b'ifeq(tags, "", "", "{tags} "))}' | ||||
b'{if(bookmarks, "{bookmarks} ")}' | b'{if(bookmarks, "{bookmarks} ")}' | ||||
b'{ifeq(branch, "default", "", "{branch} ")}' | b'{ifeq(branch, "default", "", "{branch} ")}' | ||||
b'- {author|user}: {desc|firstline}' | b'- {author|user}: {desc|firstline}' | ||||
), | ), | ||||
alias=[(b'ui', b'mergemarkertemplate')], | alias=[(b'ui', b'mergemarkertemplate')], | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'command-templates', | b'command-templates', | ||||
b'pre-merge-tool-output', | b'pre-merge-tool-output', | ||||
default=None, | default=None, | ||||
alias=[(b'ui', b'pre-merge-tool-output-template')], | alias=[(b'ui', b'pre-merge-tool-output-template')], | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'command-templates', b'oneline-summary', default=None, | b'command-templates', | ||||
b'oneline-summary', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'command-templates', | b'command-templates', | ||||
b'oneline-summary.*', | b'oneline-summary.*', | ||||
default=dynamicdefault, | default=dynamicdefault, | ||||
generic=True, | generic=True, | ||||
) | ) | ||||
_registerdiffopts(section=b'commands', configprefix=b'commit.interactive.') | _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.') | ||||
coreconfigitem( | coreconfigitem( | ||||
b'commands', b'commit.post-status', default=False, | b'commands', | ||||
b'commit.post-status', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'commands', b'grep.all-files', default=False, experimental=True, | b'commands', | ||||
b'grep.all-files', | |||||
default=False, | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'commands', b'merge.require-rev', default=False, | b'commands', | ||||
b'merge.require-rev', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'commands', b'push.require-revs', default=False, | b'commands', | ||||
b'push.require-revs', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'commands', b'resolve.confirm', default=False, | b'commands', | ||||
b'resolve.confirm', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'commands', b'resolve.explicit-re-merge', default=False, | b'commands', | ||||
b'resolve.explicit-re-merge', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'commands', b'resolve.mark-check', default=b'none', | b'commands', | ||||
b'resolve.mark-check', | |||||
default=b'none', | |||||
) | ) | ||||
_registerdiffopts(section=b'commands', configprefix=b'revert.interactive.') | _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.') | ||||
coreconfigitem( | coreconfigitem( | ||||
b'commands', b'show.aliasprefix', default=list, | b'commands', | ||||
b'show.aliasprefix', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'commands', b'status.relative', default=False, | b'commands', | ||||
b'status.relative', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'commands', b'status.skipstates', default=[], experimental=True, | b'commands', | ||||
b'status.skipstates', | |||||
default=[], | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'commands', b'status.terse', default=b'', | b'commands', | ||||
b'status.terse', | |||||
default=b'', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'commands', b'status.verbose', default=False, | b'commands', | ||||
b'status.verbose', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'commands', b'update.check', default=None, | b'commands', | ||||
b'update.check', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'commands', b'update.requiredest', default=False, | b'commands', | ||||
b'update.requiredest', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'committemplate', b'.*', default=None, generic=True, | b'committemplate', | ||||
b'.*', | |||||
default=None, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'bzr.saverev', default=True, | b'convert', | ||||
b'bzr.saverev', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'cvsps.cache', default=True, | b'convert', | ||||
b'cvsps.cache', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'cvsps.fuzz', default=60, | b'convert', | ||||
b'cvsps.fuzz', | |||||
default=60, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'cvsps.logencoding', default=None, | b'convert', | ||||
b'cvsps.logencoding', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'cvsps.mergefrom', default=None, | b'convert', | ||||
b'cvsps.mergefrom', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'cvsps.mergeto', default=None, | b'convert', | ||||
b'cvsps.mergeto', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'git.committeractions', default=lambda: [b'messagedifferent'], | b'convert', | ||||
b'git.committeractions', | |||||
default=lambda: [b'messagedifferent'], | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'git.extrakeys', default=list, | b'convert', | ||||
b'git.extrakeys', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'git.findcopiesharder', default=False, | b'convert', | ||||
b'git.findcopiesharder', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'git.remoteprefix', default=b'remote', | b'convert', | ||||
b'git.remoteprefix', | |||||
default=b'remote', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'git.renamelimit', default=400, | b'convert', | ||||
b'git.renamelimit', | |||||
default=400, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'git.saverev', default=True, | b'convert', | ||||
b'git.saverev', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'git.similarity', default=50, | b'convert', | ||||
b'git.similarity', | |||||
default=50, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'git.skipsubmodules', default=False, | b'convert', | ||||
b'git.skipsubmodules', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'hg.clonebranches', default=False, | b'convert', | ||||
b'hg.clonebranches', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'hg.ignoreerrors', default=False, | b'convert', | ||||
b'hg.ignoreerrors', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'hg.preserve-hash', default=False, | b'convert', | ||||
b'hg.preserve-hash', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'hg.revs', default=None, | b'convert', | ||||
b'hg.revs', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'hg.saverev', default=False, | b'convert', | ||||
b'hg.saverev', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'hg.sourcename', default=None, | b'convert', | ||||
b'hg.sourcename', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'hg.startrev', default=None, | b'convert', | ||||
b'hg.startrev', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'hg.tagsbranch', default=b'default', | b'convert', | ||||
b'hg.tagsbranch', | |||||
default=b'default', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'hg.usebranchnames', default=True, | b'convert', | ||||
b'hg.usebranchnames', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'ignoreancestorcheck', default=False, experimental=True, | b'convert', | ||||
b'ignoreancestorcheck', | |||||
default=False, | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'localtimezone', default=False, | b'convert', | ||||
b'localtimezone', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'p4.encoding', default=dynamicdefault, | b'convert', | ||||
b'p4.encoding', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'p4.startrev', default=0, | b'convert', | ||||
b'p4.startrev', | |||||
default=0, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'skiptags', default=False, | b'convert', | ||||
b'skiptags', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'svn.debugsvnlog', default=True, | b'convert', | ||||
b'svn.debugsvnlog', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'svn.trunk', default=None, | b'convert', | ||||
b'svn.trunk', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'svn.tags', default=None, | b'convert', | ||||
b'svn.tags', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'svn.branches', default=None, | b'convert', | ||||
b'svn.branches', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'convert', b'svn.startrev', default=0, | b'convert', | ||||
b'svn.startrev', | |||||
default=0, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'debug', b'dirstate.delaywrite', default=0, | b'debug', | ||||
b'dirstate.delaywrite', | |||||
default=0, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'defaults', b'.*', default=None, generic=True, | b'defaults', | ||||
b'.*', | |||||
default=None, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'all-warnings', default=False, | b'devel', | ||||
b'all-warnings', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'bundle2.debug', default=False, | b'devel', | ||||
b'bundle2.debug', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'bundle.delta', default=b'', | b'devel', | ||||
b'bundle.delta', | |||||
default=b'', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'cache-vfs', default=None, | b'devel', | ||||
b'cache-vfs', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'check-locks', default=False, | b'devel', | ||||
b'check-locks', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'check-relroot', default=False, | b'devel', | ||||
b'check-relroot', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'default-date', default=None, | b'devel', | ||||
b'default-date', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'deprec-warn', default=False, | b'devel', | ||||
b'deprec-warn', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'disableloaddefaultcerts', default=False, | b'devel', | ||||
b'disableloaddefaultcerts', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'warn-empty-changegroup', default=False, | b'devel', | ||||
b'warn-empty-changegroup', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'legacy.exchange', default=list, | b'devel', | ||||
b'legacy.exchange', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'persistent-nodemap', default=False, | b'devel', | ||||
b'persistent-nodemap', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'servercafile', default=b'', | b'devel', | ||||
b'servercafile', | |||||
default=b'', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'serverexactprotocol', default=b'', | b'devel', | ||||
b'serverexactprotocol', | |||||
default=b'', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'serverrequirecert', default=False, | b'devel', | ||||
b'serverrequirecert', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'strip-obsmarkers', default=True, | b'devel', | ||||
b'strip-obsmarkers', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'warn-config', default=None, | b'devel', | ||||
b'warn-config', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'warn-config-default', default=None, | b'devel', | ||||
b'warn-config-default', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'user.obsmarker', default=None, | b'devel', | ||||
b'user.obsmarker', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'warn-config-unknown', default=None, | b'devel', | ||||
b'warn-config-unknown', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'debug.copies', default=False, | b'devel', | ||||
b'debug.copies', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'debug.extensions', default=False, | b'devel', | ||||
b'debug.extensions', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'debug.repo-filters', default=False, | b'devel', | ||||
b'debug.repo-filters', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'debug.peer-request', default=False, | b'devel', | ||||
b'debug.peer-request', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'devel', b'discovery.randomize', default=True, | b'devel', | ||||
b'discovery.randomize', | |||||
default=True, | |||||
) | ) | ||||
_registerdiffopts(section=b'diff') | _registerdiffopts(section=b'diff') | ||||
coreconfigitem( | coreconfigitem( | ||||
b'email', b'bcc', default=None, | b'email', | ||||
b'bcc', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'email', b'cc', default=None, | b'email', | ||||
b'cc', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'email', b'charsets', default=list, | b'email', | ||||
b'charsets', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'email', b'from', default=None, | b'email', | ||||
b'from', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'email', b'method', default=b'smtp', | b'email', | ||||
b'method', | |||||
default=b'smtp', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'email', b'reply-to', default=None, | b'email', | ||||
b'reply-to', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'email', b'to', default=None, | b'email', | ||||
b'to', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'archivemetatemplate', default=dynamicdefault, | b'experimental', | ||||
b'archivemetatemplate', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'auto-publish', default=b'publish', | b'experimental', | ||||
b'auto-publish', | |||||
default=b'publish', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'bundle-phases', default=False, | b'experimental', | ||||
b'bundle-phases', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'bundle2-advertise', default=True, | b'experimental', | ||||
b'bundle2-advertise', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'bundle2-output-capture', default=False, | b'experimental', | ||||
b'bundle2-output-capture', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'bundle2.pushback', default=False, | b'experimental', | ||||
b'bundle2.pushback', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'bundle2lazylocking', default=False, | b'experimental', | ||||
b'bundle2lazylocking', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'bundlecomplevel', default=None, | b'experimental', | ||||
b'bundlecomplevel', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'bundlecomplevel.bzip2', default=None, | b'experimental', | ||||
b'bundlecomplevel.bzip2', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'bundlecomplevel.gzip', default=None, | b'experimental', | ||||
b'bundlecomplevel.gzip', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'bundlecomplevel.none', default=None, | b'experimental', | ||||
b'bundlecomplevel.none', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'bundlecomplevel.zstd', default=None, | b'experimental', | ||||
b'bundlecomplevel.zstd', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'changegroup3', default=False, | b'experimental', | ||||
b'changegroup3', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'cleanup-as-archived', default=False, | b'experimental', | ||||
b'cleanup-as-archived', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'clientcompressionengines', default=list, | b'experimental', | ||||
b'clientcompressionengines', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'copytrace', default=b'on', | b'experimental', | ||||
b'copytrace', | |||||
default=b'on', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'copytrace.movecandidateslimit', default=100, | b'experimental', | ||||
b'copytrace.movecandidateslimit', | |||||
default=100, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'copytrace.sourcecommitlimit', default=100, | b'experimental', | ||||
b'copytrace.sourcecommitlimit', | |||||
default=100, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'copies.read-from', default=b"filelog-only", | b'experimental', | ||||
b'copies.read-from', | |||||
default=b"filelog-only", | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'copies.write-to', default=b'filelog-only', | b'experimental', | ||||
b'copies.write-to', | |||||
default=b'filelog-only', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'crecordtest', default=None, | b'experimental', | ||||
b'crecordtest', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'directaccess', default=False, | b'experimental', | ||||
b'directaccess', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'directaccess.revnums', default=False, | b'experimental', | ||||
b'directaccess.revnums', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'editortmpinhg', default=False, | b'experimental', | ||||
b'editortmpinhg', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'evolution', default=list, | b'experimental', | ||||
b'evolution', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', | b'experimental', | ||||
b'evolution.allowdivergence', | b'evolution.allowdivergence', | ||||
default=False, | default=False, | ||||
alias=[(b'experimental', b'allowdivergence')], | alias=[(b'experimental', b'allowdivergence')], | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'evolution.allowunstable', default=None, | b'experimental', | ||||
b'evolution.allowunstable', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'evolution.createmarkers', default=None, | b'experimental', | ||||
b'evolution.createmarkers', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', | b'experimental', | ||||
b'evolution.effect-flags', | b'evolution.effect-flags', | ||||
default=True, | default=True, | ||||
alias=[(b'experimental', b'effect-flags')], | alias=[(b'experimental', b'effect-flags')], | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'evolution.exchange', default=None, | b'experimental', | ||||
b'evolution.exchange', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'evolution.bundle-obsmarker', default=False, | b'experimental', | ||||
b'evolution.bundle-obsmarker', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'log.topo', default=False, | b'experimental', | ||||
b'log.topo', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'evolution.report-instabilities', default=True, | b'experimental', | ||||
b'evolution.report-instabilities', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'evolution.track-operation', default=True, | b'experimental', | ||||
b'evolution.track-operation', | |||||
default=True, | |||||
) | ) | ||||
# repo-level config to exclude a revset visibility | # repo-level config to exclude a revset visibility | ||||
# | # | ||||
# The target use case is to use `share` to expose different subset of the same | # The target use case is to use `share` to expose different subset of the same | ||||
# repository, especially server side. See also `server.view`. | # repository, especially server side. See also `server.view`. | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'extra-filter-revs', default=None, | b'experimental', | ||||
b'extra-filter-revs', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'maxdeltachainspan', default=-1, | b'experimental', | ||||
b'maxdeltachainspan', | |||||
default=-1, | |||||
) | ) | ||||
# tracks files which were undeleted (merge might delete them but we explicitly | # tracks files which were undeleted (merge might delete them but we explicitly | ||||
# kept/undeleted them) and creates new filenodes for them | # kept/undeleted them) and creates new filenodes for them | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'merge-track-salvaged', default=False, | b'experimental', | ||||
b'merge-track-salvaged', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'mergetempdirprefix', default=None, | b'experimental', | ||||
b'mergetempdirprefix', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'mmapindexthreshold', default=None, | b'experimental', | ||||
b'mmapindexthreshold', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'narrow', default=False, | b'experimental', | ||||
b'narrow', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'nonnormalparanoidcheck', default=False, | b'experimental', | ||||
b'nonnormalparanoidcheck', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'exportableenviron', default=list, | b'experimental', | ||||
b'exportableenviron', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'extendedheader.index', default=None, | b'experimental', | ||||
b'extendedheader.index', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'extendedheader.similarity', default=False, | b'experimental', | ||||
b'extendedheader.similarity', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'graphshorten', default=False, | b'experimental', | ||||
b'graphshorten', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'graphstyle.parent', default=dynamicdefault, | b'experimental', | ||||
b'graphstyle.parent', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'graphstyle.missing', default=dynamicdefault, | b'experimental', | ||||
b'graphstyle.missing', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'graphstyle.grandparent', default=dynamicdefault, | b'experimental', | ||||
b'graphstyle.grandparent', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'hook-track-tags', default=False, | b'experimental', | ||||
b'hook-track-tags', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'httppeer.advertise-v2', default=False, | b'experimental', | ||||
b'httppeer.advertise-v2', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'httppeer.v2-encoder-order', default=None, | b'experimental', | ||||
b'httppeer.v2-encoder-order', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'httppostargs', default=False, | b'experimental', | ||||
b'httppostargs', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem(b'experimental', b'nointerrupt', default=False) | coreconfigitem(b'experimental', b'nointerrupt', default=False) | ||||
coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True) | coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'obsmarkers-exchange-debug', default=False, | b'experimental', | ||||
b'obsmarkers-exchange-debug', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'remotenames', default=False, | b'experimental', | ||||
b'remotenames', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'removeemptydirs', default=True, | b'experimental', | ||||
b'removeemptydirs', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'revert.interactive.select-to-keep', default=False, | b'experimental', | ||||
b'revert.interactive.select-to-keep', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'revisions.prefixhexnode', default=False, | b'experimental', | ||||
b'revisions.prefixhexnode', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'revlogv2', default=None, | b'experimental', | ||||
b'revlogv2', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'revisions.disambiguatewithin', default=None, | b'experimental', | ||||
b'revisions.disambiguatewithin', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'rust.index', default=False, | b'experimental', | ||||
b'rust.index', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'server.filesdata.recommended-batch-size', default=50000, | b'experimental', | ||||
b'server.filesdata.recommended-batch-size', | |||||
default=50000, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', | b'experimental', | ||||
b'server.manifestdata.recommended-batch-size', | b'server.manifestdata.recommended-batch-size', | ||||
default=100000, | default=100000, | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'server.stream-narrow-clones', default=False, | b'experimental', | ||||
b'server.stream-narrow-clones', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'single-head-per-branch', default=False, | b'experimental', | ||||
b'single-head-per-branch', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', | b'experimental', | ||||
b'single-head-per-branch:account-closed-heads', | b'single-head-per-branch:account-closed-heads', | ||||
default=False, | default=False, | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'sshserver.support-v2', default=False, | b'experimental', | ||||
b'sshserver.support-v2', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'sparse-read', default=False, | b'experimental', | ||||
b'sparse-read', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'sparse-read.density-threshold', default=0.50, | b'experimental', | ||||
b'sparse-read.density-threshold', | |||||
default=0.50, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'sparse-read.min-gap-size', default=b'65K', | b'experimental', | ||||
b'sparse-read.min-gap-size', | |||||
default=b'65K', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'treemanifest', default=False, | b'experimental', | ||||
b'treemanifest', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'update.atomic-file', default=False, | b'experimental', | ||||
b'update.atomic-file', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'sshpeer.advertise-v2', default=False, | b'experimental', | ||||
b'sshpeer.advertise-v2', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'web.apiserver', default=False, | b'experimental', | ||||
b'web.apiserver', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'web.api.http-v2', default=False, | b'experimental', | ||||
b'web.api.http-v2', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'web.api.debugreflect', default=False, | b'experimental', | ||||
b'web.api.debugreflect', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'worker.wdir-get-thread-safe', default=False, | b'experimental', | ||||
b'worker.wdir-get-thread-safe', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'worker.repository-upgrade', default=False, | b'experimental', | ||||
b'worker.repository-upgrade', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'xdiff', default=False, | b'experimental', | ||||
b'xdiff', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'extensions', b'.*', default=None, generic=True, | b'extensions', | ||||
b'.*', | |||||
default=None, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'extdata', b'.*', default=None, generic=True, | b'extdata', | ||||
b'.*', | |||||
default=None, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', b'bookmarks-in-store', default=False, | b'format', | ||||
b'bookmarks-in-store', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', b'chunkcachesize', default=None, experimental=True, | b'format', | ||||
b'chunkcachesize', | |||||
default=None, | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', b'dotencode', default=True, | b'format', | ||||
b'dotencode', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', b'generaldelta', default=False, experimental=True, | b'format', | ||||
b'generaldelta', | |||||
default=False, | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', b'manifestcachesize', default=None, experimental=True, | b'format', | ||||
b'manifestcachesize', | |||||
default=None, | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', b'maxchainlen', default=dynamicdefault, experimental=True, | b'format', | ||||
b'maxchainlen', | |||||
default=dynamicdefault, | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', b'obsstore-version', default=None, | b'format', | ||||
b'obsstore-version', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', b'sparse-revlog', default=True, | b'format', | ||||
b'sparse-revlog', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', | b'format', | ||||
b'revlog-compression', | b'revlog-compression', | ||||
default=lambda: [b'zlib'], | default=lambda: [b'zlib'], | ||||
alias=[(b'experimental', b'format.compression')], | alias=[(b'experimental', b'format.compression')], | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', b'usefncache', default=True, | b'format', | ||||
b'usefncache', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', b'usegeneraldelta', default=True, | b'format', | ||||
b'usegeneraldelta', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', b'usestore', default=True, | b'format', | ||||
b'usestore', | |||||
default=True, | |||||
) | ) | ||||
# Right now, the only efficient implement of the nodemap logic is in Rust, so | # Right now, the only efficient implement of the nodemap logic is in Rust, so | ||||
# the persistent nodemap feature needs to stay experimental as long as the Rust | # the persistent nodemap feature needs to stay experimental as long as the Rust | ||||
# extensions are an experimental feature. | # extensions are an experimental feature. | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', b'use-persistent-nodemap', default=False, experimental=True | b'format', b'use-persistent-nodemap', default=False, experimental=True | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', | b'format', | ||||
b'exp-use-copies-side-data-changeset', | b'exp-use-copies-side-data-changeset', | ||||
default=False, | default=False, | ||||
experimental=True, | experimental=True, | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', b'exp-use-side-data', default=False, experimental=True, | b'format', | ||||
b'exp-use-side-data', | |||||
default=False, | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', b'exp-share-safe', default=False, experimental=True, | b'format', | ||||
b'exp-share-safe', | |||||
default=False, | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'format', b'internal-phase', default=False, experimental=True, | b'format', | ||||
b'internal-phase', | |||||
default=False, | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'fsmonitor', b'warn_when_unused', default=True, | b'fsmonitor', | ||||
b'warn_when_unused', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'fsmonitor', b'warn_update_file_count', default=50000, | b'fsmonitor', | ||||
b'warn_update_file_count', | |||||
default=50000, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'fsmonitor', b'warn_update_file_count_rust', default=400000, | b'fsmonitor', | ||||
b'warn_update_file_count_rust', | |||||
default=400000, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'help', br'hidden-command\..*', default=False, generic=True, | b'help', | ||||
br'hidden-command\..*', | |||||
default=False, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'help', br'hidden-topic\..*', default=False, generic=True, | b'help', | ||||
br'hidden-topic\..*', | |||||
default=False, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'hooks', b'.*', default=dynamicdefault, generic=True, | b'hooks', | ||||
b'.*', | |||||
default=dynamicdefault, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'hgweb-paths', b'.*', default=list, generic=True, | b'hgweb-paths', | ||||
b'.*', | |||||
default=list, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'hostfingerprints', b'.*', default=list, generic=True, | b'hostfingerprints', | ||||
b'.*', | |||||
default=list, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'hostsecurity', b'ciphers', default=None, | b'hostsecurity', | ||||
b'ciphers', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'hostsecurity', b'minimumprotocol', default=dynamicdefault, | b'hostsecurity', | ||||
b'minimumprotocol', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'hostsecurity', | b'hostsecurity', | ||||
b'.*:minimumprotocol$', | b'.*:minimumprotocol$', | ||||
default=dynamicdefault, | default=dynamicdefault, | ||||
generic=True, | generic=True, | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'hostsecurity', b'.*:ciphers$', default=dynamicdefault, generic=True, | b'hostsecurity', | ||||
b'.*:ciphers$', | |||||
default=dynamicdefault, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'hostsecurity', b'.*:fingerprints$', default=list, generic=True, | b'hostsecurity', | ||||
b'.*:fingerprints$', | |||||
default=list, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'hostsecurity', b'.*:verifycertsfile$', default=None, generic=True, | b'hostsecurity', | ||||
b'.*:verifycertsfile$', | |||||
default=None, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'http_proxy', b'always', default=False, | b'http_proxy', | ||||
b'always', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'http_proxy', b'host', default=None, | b'http_proxy', | ||||
b'host', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'http_proxy', b'no', default=list, | b'http_proxy', | ||||
b'no', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'http_proxy', b'passwd', default=None, | b'http_proxy', | ||||
b'passwd', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'http_proxy', b'user', default=None, | b'http_proxy', | ||||
b'user', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'http', b'timeout', default=None, | b'http', | ||||
b'timeout', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'logtoprocess', b'commandexception', default=None, | b'logtoprocess', | ||||
b'commandexception', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'logtoprocess', b'commandfinish', default=None, | b'logtoprocess', | ||||
b'commandfinish', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'logtoprocess', b'command', default=None, | b'logtoprocess', | ||||
b'command', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'logtoprocess', b'develwarn', default=None, | b'logtoprocess', | ||||
b'develwarn', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'logtoprocess', b'uiblocked', default=None, | b'logtoprocess', | ||||
b'uiblocked', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge', b'checkunknown', default=b'abort', | b'merge', | ||||
b'checkunknown', | |||||
default=b'abort', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge', b'checkignored', default=b'abort', | b'merge', | ||||
b'checkignored', | |||||
default=b'abort', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'merge.checkpathconflicts', default=False, | b'experimental', | ||||
b'merge.checkpathconflicts', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge', b'followcopies', default=True, | b'merge', | ||||
b'followcopies', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge', b'on-failure', default=b'continue', | b'merge', | ||||
b'on-failure', | |||||
default=b'continue', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge', b'preferancestor', default=lambda: [b'*'], experimental=True, | b'merge', | ||||
b'preferancestor', | |||||
default=lambda: [b'*'], | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge', b'strict-capability-check', default=False, | b'merge', | ||||
b'strict-capability-check', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge-tools', b'.*', default=None, generic=True, | b'merge-tools', | ||||
b'.*', | |||||
default=None, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge-tools', | b'merge-tools', | ||||
br'.*\.args$', | br'.*\.args$', | ||||
default=b"$local $base $other", | default=b"$local $base $other", | ||||
generic=True, | generic=True, | ||||
priority=-1, | priority=-1, | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge-tools', br'.*\.binary$', default=False, generic=True, priority=-1, | b'merge-tools', | ||||
br'.*\.binary$', | |||||
default=False, | |||||
generic=True, | |||||
priority=-1, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge-tools', br'.*\.check$', default=list, generic=True, priority=-1, | b'merge-tools', | ||||
br'.*\.check$', | |||||
default=list, | |||||
generic=True, | |||||
priority=-1, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge-tools', | b'merge-tools', | ||||
br'.*\.checkchanged$', | br'.*\.checkchanged$', | ||||
default=False, | default=False, | ||||
generic=True, | generic=True, | ||||
priority=-1, | priority=-1, | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge-tools', | b'merge-tools', | ||||
br'.*\.executable$', | br'.*\.executable$', | ||||
default=dynamicdefault, | default=dynamicdefault, | ||||
generic=True, | generic=True, | ||||
priority=-1, | priority=-1, | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge-tools', br'.*\.fixeol$', default=False, generic=True, priority=-1, | b'merge-tools', | ||||
br'.*\.fixeol$', | |||||
default=False, | |||||
generic=True, | |||||
priority=-1, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge-tools', br'.*\.gui$', default=False, generic=True, priority=-1, | b'merge-tools', | ||||
br'.*\.gui$', | |||||
default=False, | |||||
generic=True, | |||||
priority=-1, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge-tools', | b'merge-tools', | ||||
br'.*\.mergemarkers$', | br'.*\.mergemarkers$', | ||||
default=b'basic', | default=b'basic', | ||||
generic=True, | generic=True, | ||||
priority=-1, | priority=-1, | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge-tools', | b'merge-tools', | ||||
br'.*\.mergemarkertemplate$', | br'.*\.mergemarkertemplate$', | ||||
default=dynamicdefault, # take from command-templates.mergemarker | default=dynamicdefault, # take from command-templates.mergemarker | ||||
generic=True, | generic=True, | ||||
priority=-1, | priority=-1, | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge-tools', br'.*\.priority$', default=0, generic=True, priority=-1, | b'merge-tools', | ||||
br'.*\.priority$', | |||||
default=0, | |||||
generic=True, | |||||
priority=-1, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge-tools', | b'merge-tools', | ||||
br'.*\.premerge$', | br'.*\.premerge$', | ||||
default=dynamicdefault, | default=dynamicdefault, | ||||
generic=True, | generic=True, | ||||
priority=-1, | priority=-1, | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'merge-tools', br'.*\.symlink$', default=False, generic=True, priority=-1, | b'merge-tools', | ||||
br'.*\.symlink$', | |||||
default=False, | |||||
generic=True, | |||||
priority=-1, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'pager', b'attend-.*', default=dynamicdefault, generic=True, | b'pager', | ||||
b'attend-.*', | |||||
default=dynamicdefault, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'pager', b'ignore', default=list, | b'pager', | ||||
b'ignore', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'pager', b'pager', default=dynamicdefault, | b'pager', | ||||
b'pager', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'patch', b'eol', default=b'strict', | b'patch', | ||||
b'eol', | |||||
default=b'strict', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'patch', b'fuzz', default=2, | b'patch', | ||||
b'fuzz', | |||||
default=2, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'paths', b'default', default=None, | b'paths', | ||||
b'default', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'paths', b'default-push', default=None, | b'paths', | ||||
b'default-push', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'paths', b'.*', default=None, generic=True, | b'paths', | ||||
b'.*', | |||||
default=None, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'phases', b'checksubrepos', default=b'follow', | b'phases', | ||||
b'checksubrepos', | |||||
default=b'follow', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'phases', b'new-commit', default=b'draft', | b'phases', | ||||
b'new-commit', | |||||
default=b'draft', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'phases', b'publish', default=True, | b'phases', | ||||
b'publish', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'profiling', b'enabled', default=False, | b'profiling', | ||||
b'enabled', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'profiling', b'format', default=b'text', | b'profiling', | ||||
b'format', | |||||
default=b'text', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'profiling', b'freq', default=1000, | b'profiling', | ||||
b'freq', | |||||
default=1000, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'profiling', b'limit', default=30, | b'profiling', | ||||
b'limit', | |||||
default=30, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'profiling', b'nested', default=0, | b'profiling', | ||||
b'nested', | |||||
default=0, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'profiling', b'output', default=None, | b'profiling', | ||||
b'output', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'profiling', b'showmax', default=0.999, | b'profiling', | ||||
b'showmax', | |||||
default=0.999, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'profiling', b'showmin', default=dynamicdefault, | b'profiling', | ||||
b'showmin', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'profiling', b'showtime', default=True, | b'profiling', | ||||
b'showtime', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'profiling', b'sort', default=b'inlinetime', | b'profiling', | ||||
b'sort', | |||||
default=b'inlinetime', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'profiling', b'statformat', default=b'hotpath', | b'profiling', | ||||
b'statformat', | |||||
default=b'hotpath', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'profiling', b'time-track', default=dynamicdefault, | b'profiling', | ||||
b'time-track', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'profiling', b'type', default=b'stat', | b'profiling', | ||||
b'type', | |||||
default=b'stat', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'progress', b'assume-tty', default=False, | b'progress', | ||||
b'assume-tty', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'progress', b'changedelay', default=1, | b'progress', | ||||
b'changedelay', | |||||
default=1, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'progress', b'clear-complete', default=True, | b'progress', | ||||
b'clear-complete', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'progress', b'debug', default=False, | b'progress', | ||||
b'debug', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'progress', b'delay', default=3, | b'progress', | ||||
b'delay', | |||||
default=3, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'progress', b'disable', default=False, | b'progress', | ||||
b'disable', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'progress', b'estimateinterval', default=60.0, | b'progress', | ||||
b'estimateinterval', | |||||
default=60.0, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'progress', | b'progress', | ||||
b'format', | b'format', | ||||
default=lambda: [b'topic', b'bar', b'number', b'estimate'], | default=lambda: [b'topic', b'bar', b'number', b'estimate'], | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'progress', b'refresh', default=0.1, | b'progress', | ||||
b'refresh', | |||||
default=0.1, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'progress', b'width', default=dynamicdefault, | b'progress', | ||||
b'width', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'pull', b'confirm', default=False, | b'pull', | ||||
b'confirm', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'push', b'pushvars.server', default=False, | b'push', | ||||
b'pushvars.server', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'rewrite', | b'rewrite', | ||||
b'backup-bundle', | b'backup-bundle', | ||||
default=True, | default=True, | ||||
alias=[(b'ui', b'history-editing-backup')], | alias=[(b'ui', b'history-editing-backup')], | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'rewrite', b'update-timestamp', default=False, | b'rewrite', | ||||
b'update-timestamp', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'rewrite', b'empty-successor', default=b'skip', experimental=True, | b'rewrite', | ||||
b'empty-successor', | |||||
default=b'skip', | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True, | b'storage', | ||||
b'new-repo-backend', | |||||
default=b'revlogv1', | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'storage', | b'storage', | ||||
b'revlog.optimize-delta-parent-choice', | b'revlog.optimize-delta-parent-choice', | ||||
default=True, | default=True, | ||||
alias=[(b'format', b'aggressivemergedeltas')], | alias=[(b'format', b'aggressivemergedeltas')], | ||||
) | ) | ||||
# experimental as long as rust is experimental (or a C version is implemented) | # experimental as long as rust is experimental (or a C version is implemented) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'storage', b'revlog.nodemap.mmap', default=True, experimental=True | b'storage', b'revlog.nodemap.mmap', default=True, experimental=True | ||||
) | ) | ||||
# experimental as long as format.use-persistent-nodemap is. | # experimental as long as format.use-persistent-nodemap is. | ||||
coreconfigitem( | coreconfigitem( | ||||
b'storage', b'revlog.nodemap.mode', default=b'compat', experimental=True | b'storage', b'revlog.nodemap.mode', default=b'compat', experimental=True | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'storage', b'revlog.reuse-external-delta', default=True, | b'storage', | ||||
b'revlog.reuse-external-delta', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'storage', b'revlog.reuse-external-delta-parent', default=None, | b'storage', | ||||
b'revlog.reuse-external-delta-parent', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'storage', b'revlog.zlib.level', default=None, | b'storage', | ||||
b'revlog.zlib.level', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'storage', b'revlog.zstd.level', default=None, | b'storage', | ||||
b'revlog.zstd.level', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'bookmarks-pushkey-compat', default=True, | b'server', | ||||
b'bookmarks-pushkey-compat', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'bundle1', default=True, | b'server', | ||||
b'bundle1', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'bundle1gd', default=None, | b'server', | ||||
b'bundle1gd', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'bundle1.pull', default=None, | b'server', | ||||
b'bundle1.pull', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'bundle1gd.pull', default=None, | b'server', | ||||
b'bundle1gd.pull', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'bundle1.push', default=None, | b'server', | ||||
b'bundle1.push', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'bundle1gd.push', default=None, | b'server', | ||||
b'bundle1gd.push', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', | b'server', | ||||
b'bundle2.stream', | b'bundle2.stream', | ||||
default=True, | default=True, | ||||
alias=[(b'experimental', b'bundle2.stream')], | alias=[(b'experimental', b'bundle2.stream')], | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'compressionengines', default=list, | b'server', | ||||
b'compressionengines', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'concurrent-push-mode', default=b'check-related', | b'server', | ||||
b'concurrent-push-mode', | |||||
default=b'check-related', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'disablefullbundle', default=False, | b'server', | ||||
b'disablefullbundle', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'maxhttpheaderlen', default=1024, | b'server', | ||||
b'maxhttpheaderlen', | |||||
default=1024, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'pullbundle', default=False, | b'server', | ||||
b'pullbundle', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'preferuncompressed', default=False, | b'server', | ||||
b'preferuncompressed', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'streamunbundle', default=False, | b'server', | ||||
b'streamunbundle', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'uncompressed', default=True, | b'server', | ||||
b'uncompressed', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'uncompressedallowsecret', default=False, | b'server', | ||||
b'uncompressedallowsecret', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'view', default=b'served', | b'server', | ||||
b'view', | |||||
default=b'served', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'validate', default=False, | b'server', | ||||
b'validate', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'zliblevel', default=-1, | b'server', | ||||
b'zliblevel', | |||||
default=-1, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'server', b'zstdlevel', default=3, | b'server', | ||||
b'zstdlevel', | |||||
default=3, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'share', b'pool', default=None, | b'share', | ||||
b'pool', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'share', b'poolnaming', default=b'identity', | b'share', | ||||
b'poolnaming', | |||||
default=b'identity', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'shelve', b'maxbackups', default=10, | b'shelve', | ||||
b'maxbackups', | |||||
default=10, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'smtp', b'host', default=None, | b'smtp', | ||||
b'host', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'smtp', b'local_hostname', default=None, | b'smtp', | ||||
b'local_hostname', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'smtp', b'password', default=None, | b'smtp', | ||||
b'password', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'smtp', b'port', default=dynamicdefault, | b'smtp', | ||||
b'port', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'smtp', b'tls', default=b'none', | b'smtp', | ||||
b'tls', | |||||
default=b'none', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'smtp', b'username', default=None, | b'smtp', | ||||
b'username', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'sparse', b'missingwarning', default=True, experimental=True, | b'sparse', | ||||
b'missingwarning', | |||||
default=True, | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'subrepos', | b'subrepos', | ||||
b'allowed', | b'allowed', | ||||
default=dynamicdefault, # to make backporting simpler | default=dynamicdefault, # to make backporting simpler | ||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'subrepos', b'hg:allowed', default=dynamicdefault, | b'subrepos', | ||||
b'hg:allowed', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'subrepos', b'git:allowed', default=dynamicdefault, | b'subrepos', | ||||
b'git:allowed', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'subrepos', b'svn:allowed', default=dynamicdefault, | b'subrepos', | ||||
b'svn:allowed', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'templates', b'.*', default=None, generic=True, | b'templates', | ||||
b'.*', | |||||
default=None, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'templateconfig', b'.*', default=dynamicdefault, generic=True, | b'templateconfig', | ||||
b'.*', | |||||
default=dynamicdefault, | |||||
generic=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'trusted', b'groups', default=list, | b'trusted', | ||||
b'groups', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'trusted', b'users', default=list, | b'trusted', | ||||
b'users', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'_usedassubrepo', default=False, | b'ui', | ||||
b'_usedassubrepo', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'allowemptycommit', default=False, | b'ui', | ||||
b'allowemptycommit', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'archivemeta', default=True, | b'ui', | ||||
b'archivemeta', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'askusername', default=False, | b'ui', | ||||
b'askusername', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'available-memory', default=None, | b'ui', | ||||
b'available-memory', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'clonebundlefallback', default=False, | b'ui', | ||||
b'clonebundlefallback', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'clonebundleprefers', default=list, | b'ui', | ||||
b'clonebundleprefers', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'clonebundles', default=True, | b'ui', | ||||
b'clonebundles', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'color', default=b'auto', | b'ui', | ||||
b'color', | |||||
default=b'auto', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'commitsubrepos', default=False, | b'ui', | ||||
b'commitsubrepos', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'debug', default=False, | b'ui', | ||||
b'debug', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'debugger', default=None, | b'ui', | ||||
b'debugger', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'editor', default=dynamicdefault, | b'ui', | ||||
b'editor', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'detailed-exit-code', default=False, experimental=True, | b'ui', | ||||
b'detailed-exit-code', | |||||
default=False, | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'fallbackencoding', default=None, | b'ui', | ||||
b'fallbackencoding', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'forcecwd', default=None, | b'ui', | ||||
b'forcecwd', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'forcemerge', default=None, | b'ui', | ||||
b'forcemerge', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'formatdebug', default=False, | b'ui', | ||||
b'formatdebug', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'formatjson', default=False, | b'ui', | ||||
b'formatjson', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'formatted', default=None, | b'ui', | ||||
b'formatted', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'interactive', default=None, | b'ui', | ||||
b'interactive', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'interface', default=None, | b'ui', | ||||
b'interface', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'interface.chunkselector', default=None, | b'ui', | ||||
b'interface.chunkselector', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'large-file-limit', default=10000000, | b'ui', | ||||
b'large-file-limit', | |||||
default=10000000, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'logblockedtimes', default=False, | b'ui', | ||||
b'logblockedtimes', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'merge', default=None, | b'ui', | ||||
b'merge', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'mergemarkers', default=b'basic', | b'ui', | ||||
b'mergemarkers', | |||||
default=b'basic', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'message-output', default=b'stdio', | b'ui', | ||||
b'message-output', | |||||
default=b'stdio', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'nontty', default=False, | b'ui', | ||||
b'nontty', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'origbackuppath', default=None, | b'ui', | ||||
b'origbackuppath', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'paginate', default=True, | b'ui', | ||||
b'paginate', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'patch', default=None, | b'ui', | ||||
b'patch', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'portablefilenames', default=b'warn', | b'ui', | ||||
b'portablefilenames', | |||||
default=b'warn', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'promptecho', default=False, | b'ui', | ||||
b'promptecho', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'quiet', default=False, | b'ui', | ||||
b'quiet', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'quietbookmarkmove', default=False, | b'ui', | ||||
b'quietbookmarkmove', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'relative-paths', default=b'legacy', | b'ui', | ||||
b'relative-paths', | |||||
default=b'legacy', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'remotecmd', default=b'hg', | b'ui', | ||||
b'remotecmd', | |||||
default=b'hg', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'report_untrusted', default=True, | b'ui', | ||||
b'report_untrusted', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'rollback', default=True, | b'ui', | ||||
b'rollback', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'signal-safe-lock', default=True, | b'ui', | ||||
b'signal-safe-lock', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'slash', default=False, | b'ui', | ||||
b'slash', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'ssh', default=b'ssh', | b'ui', | ||||
b'ssh', | |||||
default=b'ssh', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'ssherrorhint', default=None, | b'ui', | ||||
b'ssherrorhint', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'statuscopies', default=False, | b'ui', | ||||
b'statuscopies', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'strict', default=False, | b'ui', | ||||
b'strict', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'style', default=b'', | b'ui', | ||||
b'style', | |||||
default=b'', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'supportcontact', default=None, | b'ui', | ||||
b'supportcontact', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'textwidth', default=78, | b'ui', | ||||
b'textwidth', | |||||
default=78, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'timeout', default=b'600', | b'ui', | ||||
b'timeout', | |||||
default=b'600', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'timeout.warn', default=0, | b'ui', | ||||
b'timeout.warn', | |||||
default=0, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'timestamp-output', default=False, | b'ui', | ||||
b'timestamp-output', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'traceback', default=False, | b'ui', | ||||
b'traceback', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'tweakdefaults', default=False, | b'ui', | ||||
b'tweakdefaults', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')]) | coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')]) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'ui', b'verbose', default=False, | b'ui', | ||||
b'verbose', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'verify', b'skipflags', default=None, | b'verify', | ||||
b'skipflags', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'allowbz2', default=False, | b'web', | ||||
b'allowbz2', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'allowgz', default=False, | b'web', | ||||
b'allowgz', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'allow-pull', alias=[(b'web', b'allowpull')], default=True, | b'web', | ||||
b'allow-pull', | |||||
alias=[(b'web', b'allowpull')], | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'allow-push', alias=[(b'web', b'allow_push')], default=list, | b'web', | ||||
b'allow-push', | |||||
alias=[(b'web', b'allow_push')], | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'allowzip', default=False, | b'web', | ||||
b'allowzip', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'archivesubrepos', default=False, | b'web', | ||||
b'archivesubrepos', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'cache', default=True, | b'web', | ||||
b'cache', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'comparisoncontext', default=5, | b'web', | ||||
b'comparisoncontext', | |||||
default=5, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'contact', default=None, | b'web', | ||||
b'contact', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'deny_push', default=list, | b'web', | ||||
b'deny_push', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'guessmime', default=False, | b'web', | ||||
b'guessmime', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'hidden', default=False, | b'web', | ||||
b'hidden', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'labels', default=list, | b'web', | ||||
b'labels', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'logoimg', default=b'hglogo.png', | b'web', | ||||
b'logoimg', | |||||
default=b'hglogo.png', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'logourl', default=b'https://mercurial-scm.org/', | b'web', | ||||
b'logourl', | |||||
default=b'https://mercurial-scm.org/', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'accesslog', default=b'-', | b'web', | ||||
b'accesslog', | |||||
default=b'-', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'address', default=b'', | b'web', | ||||
b'address', | |||||
default=b'', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'allow-archive', alias=[(b'web', b'allow_archive')], default=list, | b'web', | ||||
b'allow-archive', | |||||
alias=[(b'web', b'allow_archive')], | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'allow_read', default=list, | b'web', | ||||
b'allow_read', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'baseurl', default=None, | b'web', | ||||
b'baseurl', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'cacerts', default=None, | b'web', | ||||
b'cacerts', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'certificate', default=None, | b'web', | ||||
b'certificate', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'collapse', default=False, | b'web', | ||||
b'collapse', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'csp', default=None, | b'web', | ||||
b'csp', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'deny_read', default=list, | b'web', | ||||
b'deny_read', | |||||
default=list, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'descend', default=True, | b'web', | ||||
b'descend', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'description', default=b"", | b'web', | ||||
b'description', | |||||
default=b"", | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'encoding', default=lambda: encoding.encoding, | b'web', | ||||
b'encoding', | |||||
default=lambda: encoding.encoding, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'errorlog', default=b'-', | b'web', | ||||
b'errorlog', | |||||
default=b'-', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'ipv6', default=False, | b'web', | ||||
b'ipv6', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'maxchanges', default=10, | b'web', | ||||
b'maxchanges', | |||||
default=10, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'maxfiles', default=10, | b'web', | ||||
b'maxfiles', | |||||
default=10, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'maxshortchanges', default=60, | b'web', | ||||
b'maxshortchanges', | |||||
default=60, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'motd', default=b'', | b'web', | ||||
b'motd', | |||||
default=b'', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'name', default=dynamicdefault, | b'web', | ||||
b'name', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'port', default=8000, | b'web', | ||||
b'port', | |||||
default=8000, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'prefix', default=b'', | b'web', | ||||
b'prefix', | |||||
default=b'', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'push_ssl', default=True, | b'web', | ||||
b'push_ssl', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'refreshinterval', default=20, | b'web', | ||||
b'refreshinterval', | |||||
default=20, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'server-header', default=None, | b'web', | ||||
b'server-header', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'static', default=None, | b'web', | ||||
b'static', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'staticurl', default=None, | b'web', | ||||
b'staticurl', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'stripes', default=1, | b'web', | ||||
b'stripes', | |||||
default=1, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'style', default=b'paper', | b'web', | ||||
b'style', | |||||
default=b'paper', | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'templates', default=None, | b'web', | ||||
b'templates', | |||||
default=None, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'web', b'view', default=b'served', experimental=True, | b'web', | ||||
b'view', | |||||
default=b'served', | |||||
experimental=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'worker', b'backgroundclose', default=dynamicdefault, | b'worker', | ||||
b'backgroundclose', | |||||
default=dynamicdefault, | |||||
) | ) | ||||
# Windows defaults to a limit of 512 open files. A buffer of 128 | # Windows defaults to a limit of 512 open files. A buffer of 128 | ||||
# should give us enough headway. | # should give us enough headway. | ||||
coreconfigitem( | coreconfigitem( | ||||
b'worker', b'backgroundclosemaxqueue', default=384, | b'worker', | ||||
b'backgroundclosemaxqueue', | |||||
default=384, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'worker', b'backgroundcloseminfilecount', default=2048, | b'worker', | ||||
b'backgroundcloseminfilecount', | |||||
default=2048, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'worker', b'backgroundclosethreadcount', default=4, | b'worker', | ||||
b'backgroundclosethreadcount', | |||||
default=4, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'worker', b'enabled', default=True, | b'worker', | ||||
b'enabled', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'worker', b'numcpus', default=None, | b'worker', | ||||
b'numcpus', | |||||
default=None, | |||||
) | ) | ||||
# Rebase related configuration moved to core because other extension are doing | # Rebase related configuration moved to core because other extension are doing | ||||
# strange things. For example, shelve import the extensions to reuse some bit | # strange things. For example, shelve import the extensions to reuse some bit | ||||
# without formally loading it. | # without formally loading it. | ||||
coreconfigitem( | coreconfigitem( | ||||
b'commands', b'rebase.requiredest', default=False, | b'commands', | ||||
b'rebase.requiredest', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'experimental', b'rebaseskipobsolete', default=True, | b'experimental', | ||||
b'rebaseskipobsolete', | |||||
default=True, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'rebase', b'singletransaction', default=False, | b'rebase', | ||||
b'singletransaction', | |||||
default=False, | |||||
) | ) | ||||
coreconfigitem( | coreconfigitem( | ||||
b'rebase', b'experimental.inmemory', default=False, | b'rebase', | ||||
b'experimental.inmemory', | |||||
default=False, | |||||
) | ) |
def sub(self, path, allowcreate=True): | def sub(self, path, allowcreate=True): | ||||
'''return a subrepo for the stored revision of path, never wdir()''' | '''return a subrepo for the stored revision of path, never wdir()''' | ||||
return subrepo.subrepo(self, path, allowcreate=allowcreate) | return subrepo.subrepo(self, path, allowcreate=allowcreate) | ||||
def nullsub(self, path, pctx): | def nullsub(self, path, pctx): | ||||
return subrepo.nullsubrepo(self, path, pctx) | return subrepo.nullsubrepo(self, path, pctx) | ||||
def workingsub(self, path): | def workingsub(self, path): | ||||
'''return a subrepo for the stored revision, or wdir if this is a wdir | """return a subrepo for the stored revision, or wdir if this is a wdir | ||||
context. | context. | ||||
''' | """ | ||||
return subrepo.subrepo(self, path, allowwdir=True) | return subrepo.subrepo(self, path, allowwdir=True) | ||||
def match( | def match( | ||||
self, | self, | ||||
pats=None, | pats=None, | ||||
include=None, | include=None, | ||||
exclude=None, | exclude=None, | ||||
default=b'glob', | default=b'glob', | ||||
return a | return a | ||||
# In theory, we should never get out of that loop without a result. | # In theory, we should never get out of that loop without a result. | ||||
# But if manifest uses a buggy file revision (not children of the | # But if manifest uses a buggy file revision (not children of the | ||||
# one it replaces) we could. Such a buggy situation will likely | # one it replaces) we could. Such a buggy situation will likely | ||||
# result is crash somewhere else at to some point. | # result is crash somewhere else at to some point. | ||||
return lkr | return lkr | ||||
def isintroducedafter(self, changelogrev): | def isintroducedafter(self, changelogrev): | ||||
"""True if a filectx has been introduced after a given floor revision | """True if a filectx has been introduced after a given floor revision""" | ||||
""" | |||||
if self.linkrev() >= changelogrev: | if self.linkrev() >= changelogrev: | ||||
return True | return True | ||||
introrev = self._introrev(stoprev=changelogrev) | introrev = self._introrev(stoprev=changelogrev) | ||||
if introrev is None: | if introrev is None: | ||||
return False | return False | ||||
return introrev >= changelogrev | return introrev >= changelogrev | ||||
def introrev(self): | def introrev(self): | ||||
This is often equivalent to how the data would be expressed on disk. | This is often equivalent to how the data would be expressed on disk. | ||||
""" | """ | ||||
return self._repo.wwritedata(self.path(), self.data()) | return self._repo.wwritedata(self.path(), self.data()) | ||||
class filectx(basefilectx): | class filectx(basefilectx): | ||||
"""A filecontext object makes access to data related to a particular | """A filecontext object makes access to data related to a particular | ||||
filerevision convenient.""" | filerevision convenient.""" | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
repo, | repo, | ||||
path, | path, | ||||
changeid=None, | changeid=None, | ||||
fileid=None, | fileid=None, | ||||
filelog=None, | filelog=None, | ||||
changectx=None, | changectx=None, | ||||
): | ): | ||||
"""changeid must be a revision number, if specified. | """changeid must be a revision number, if specified. | ||||
fileid can be a file revision or node.""" | fileid can be a file revision or node.""" | ||||
self._repo = repo | self._repo = repo | ||||
self._path = path | self._path = path | ||||
assert ( | assert ( | ||||
changeid is not None or fileid is not None or changectx is not None | changeid is not None or fileid is not None or changectx is not None | ||||
), ( | ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % ( | ||||
b"bad args: changeid=%r, fileid=%r, changectx=%r" | changeid, | ||||
% (changeid, fileid, changectx,) | fileid, | ||||
changectx, | |||||
) | ) | ||||
if filelog is not None: | if filelog is not None: | ||||
self._filelog = filelog | self._filelog = filelog | ||||
if changeid is not None: | if changeid is not None: | ||||
self._changeid = changeid | self._changeid = changeid | ||||
if changectx is not None: | if changectx is not None: | ||||
# behavior" is seen as better as "crash" | # behavior" is seen as better as "crash" | ||||
# | # | ||||
# Linkrevs have several serious troubles with filtering that are | # Linkrevs have several serious troubles with filtering that are | ||||
# complicated to solve. Proper handling of the issue here should be | # complicated to solve. Proper handling of the issue here should be | ||||
# considered when solving linkrev issue are on the table. | # considered when solving linkrev issue are on the table. | ||||
return self._repo.unfiltered()[self._changeid] | return self._repo.unfiltered()[self._changeid] | ||||
def filectx(self, fileid, changeid=None): | def filectx(self, fileid, changeid=None): | ||||
'''opens an arbitrary revision of the file without | """opens an arbitrary revision of the file without | ||||
opening a new filelog''' | opening a new filelog""" | ||||
return filectx( | return filectx( | ||||
self._repo, | self._repo, | ||||
self._path, | self._path, | ||||
fileid=fileid, | fileid=fileid, | ||||
filelog=self._filelog, | filelog=self._filelog, | ||||
changeid=changeid, | changeid=changeid, | ||||
) | ) | ||||
] | ] | ||||
def children(self): | def children(self): | ||||
return [] | return [] | ||||
class workingfilectx(committablefilectx): | class workingfilectx(committablefilectx): | ||||
"""A workingfilectx object makes access to data related to a particular | """A workingfilectx object makes access to data related to a particular | ||||
file in the working directory convenient.""" | file in the working directory convenient.""" | ||||
def __init__(self, repo, path, filelog=None, workingctx=None): | def __init__(self, repo, path, filelog=None, workingctx=None): | ||||
super(workingfilectx, self).__init__(repo, path, filelog, workingctx) | super(workingfilectx, self).__init__(repo, path, filelog, workingctx) | ||||
@propertycache | @propertycache | ||||
def _changectx(self): | def _changectx(self): | ||||
return workingctx(self._repo) | return workingctx(self._repo) | ||||
[], | [], | ||||
[], | [], | ||||
[], | [], | ||||
clean, | clean, | ||||
) | ) | ||||
@propertycache | @propertycache | ||||
def _changedset(self): | def _changedset(self): | ||||
"""Return the set of files changed in this context | """Return the set of files changed in this context""" | ||||
""" | |||||
changed = set(self._status.modified) | changed = set(self._status.modified) | ||||
changed.update(self._status.added) | changed.update(self._status.added) | ||||
changed.update(self._status.removed) | changed.update(self._status.removed) | ||||
return changed | return changed | ||||
def makecachingfilectxfn(func): | def makecachingfilectxfn(func): | ||||
"""Create a filectxfn that caches based on the path. | """Create a filectxfn that caches based on the path. | ||||
for f in self._status.removed: | for f in self._status.removed: | ||||
if f in man: | if f in man: | ||||
del man[f] | del man[f] | ||||
return man | return man | ||||
@propertycache | @propertycache | ||||
def _status(self): | def _status(self): | ||||
"""Calculate exact status from ``files`` specified at construction | """Calculate exact status from ``files`` specified at construction""" | ||||
""" | |||||
man1 = self.p1().manifest() | man1 = self.p1().manifest() | ||||
p2 = self._parents[1] | p2 = self._parents[1] | ||||
# "1 < len(self._parents)" can't be used for checking | # "1 < len(self._parents)" can't be used for checking | ||||
# existence of the 2nd parent, because "memctx._parents" is | # existence of the 2nd parent, because "memctx._parents" is | ||||
# explicitly initialized by the list, of which length is 2. | # explicitly initialized by the list, of which length is 2. | ||||
if p2.node() != nullid: | if p2.node() != nullid: | ||||
man2 = p2.manifest() | man2 = p2.manifest() | ||||
managing = lambda f: f in man1 or f in man2 | managing = lambda f: f in man1 or f in man2 |
if _isfullcopytraceable(repo, c1, base): | if _isfullcopytraceable(repo, c1, base): | ||||
return _fullcopytracing(repo, c1, c2, base) | return _fullcopytracing(repo, c1, c2, base) | ||||
return _heuristicscopytracing(repo, c1, c2, base) | return _heuristicscopytracing(repo, c1, c2, base) | ||||
else: | else: | ||||
return _fullcopytracing(repo, c1, c2, base) | return _fullcopytracing(repo, c1, c2, base) | ||||
def _isfullcopytraceable(repo, c1, base): | def _isfullcopytraceable(repo, c1, base): | ||||
""" Checks that if base, source and destination are all no-public branches, | """Checks that if base, source and destination are all no-public branches, | ||||
if yes let's use the full copytrace algorithm for increased capabilities | if yes let's use the full copytrace algorithm for increased capabilities | ||||
since it will be fast enough. | since it will be fast enough. | ||||
`experimental.copytrace.sourcecommitlimit` can be used to set a limit for | `experimental.copytrace.sourcecommitlimit` can be used to set a limit for | ||||
number of changesets from c1 to base such that if number of changesets are | number of changesets from c1 to base such that if number of changesets are | ||||
more than the limit, full copytracing algorithm won't be used. | more than the limit, full copytracing algorithm won't be used. | ||||
""" | """ | ||||
if c1.rev() is None: | if c1.rev() is None: | ||||
self, copy=None, renamedelete=None, dirmove=None, movewithdir=None | self, copy=None, renamedelete=None, dirmove=None, movewithdir=None | ||||
): | ): | ||||
self.copy = {} if copy is None else copy | self.copy = {} if copy is None else copy | ||||
self.renamedelete = {} if renamedelete is None else renamedelete | self.renamedelete = {} if renamedelete is None else renamedelete | ||||
self.dirmove = {} if dirmove is None else dirmove | self.dirmove = {} if dirmove is None else dirmove | ||||
self.movewithdir = {} if movewithdir is None else movewithdir | self.movewithdir = {} if movewithdir is None else movewithdir | ||||
def __repr__(self): | def __repr__(self): | ||||
return ( | return '<branch_copies\n copy=%r\n renamedelete=%r\n dirmove=%r\n movewithdir=%r\n>' % ( | ||||
'<branch_copies\n copy=%r\n renamedelete=%r\n dirmove=%r\n movewithdir=%r\n>' | self.copy, | ||||
% (self.copy, self.renamedelete, self.dirmove, self.movewithdir,) | self.renamedelete, | ||||
self.dirmove, | |||||
self.movewithdir, | |||||
) | ) | ||||
def _fullcopytracing(repo, c1, c2, base): | def _fullcopytracing(repo, c1, c2, base): | ||||
""" The full copytracing algorithm which finds all the new files that were | """The full copytracing algorithm which finds all the new files that were | ||||
added from merge base up to the top commit and for each file it checks if | added from merge base up to the top commit and for each file it checks if | ||||
this file was copied from another file. | this file was copied from another file. | ||||
This is pretty slow when a lot of changesets are involved but will track all | This is pretty slow when a lot of changesets are involved but will track all | ||||
the copies. | the copies. | ||||
""" | """ | ||||
m1 = c1.manifest() | m1 = c1.manifest() | ||||
m2 = c2.manifest() | m2 = c2.manifest() | ||||
% (f, df) | % (f, df) | ||||
) | ) | ||||
break | break | ||||
return dirmove, movewithdir | return dirmove, movewithdir | ||||
def _heuristicscopytracing(repo, c1, c2, base): | def _heuristicscopytracing(repo, c1, c2, base): | ||||
""" Fast copytracing using filename heuristics | """Fast copytracing using filename heuristics | ||||
Assumes that moves or renames are of following two types: | Assumes that moves or renames are of following two types: | ||||
1) Inside a directory only (same directory name but different filenames) | 1) Inside a directory only (same directory name but different filenames) | ||||
2) Move from one directory to another | 2) Move from one directory to another | ||||
(same filenames but different directory names) | (same filenames but different directory names) | ||||
Works only when there are no merge commits in the "source branch". | Works only when there are no merge commits in the "source branch". |
""" | """ | ||||
for header in self.headerlist: | for header in self.headerlist: | ||||
for hunk in header.allchildren(): | for hunk in header.allchildren(): | ||||
for line in hunk.allchildren(): | for line in hunk.allchildren(): | ||||
self.toggleapply(line) | self.toggleapply(line) | ||||
def toggleallbetween(self): | def toggleallbetween(self): | ||||
"""toggle applied on or off for all items in range [lastapplied, | """toggle applied on or off for all items in range [lastapplied, | ||||
current]. """ | current].""" | ||||
if ( | if ( | ||||
not self.lastapplieditem | not self.lastapplieditem | ||||
or self.currentselecteditem == self.lastapplieditem | or self.currentselecteditem == self.lastapplieditem | ||||
): | ): | ||||
# Treat this like a normal 'x'/' ' | # Treat this like a normal 'x'/' ' | ||||
self.toggleapply() | self.toggleapply() | ||||
return | return | ||||
def _decoratelines(text, fctx): | def _decoratelines(text, fctx): | ||||
n = _countlines(text) | n = _countlines(text) | ||||
linenos = pycompat.rangelist(1, n + 1) | linenos = pycompat.rangelist(1, n + 1) | ||||
return _annotatedfile([fctx] * n, linenos, [False] * n, text) | return _annotatedfile([fctx] * n, linenos, [False] * n, text) | ||||
def _annotatepair(parents, childfctx, child, skipchild, diffopts): | def _annotatepair(parents, childfctx, child, skipchild, diffopts): | ||||
r''' | r""" | ||||
Given parent and child fctxes and annotate data for parents, for all lines | Given parent and child fctxes and annotate data for parents, for all lines | ||||
in either parent that match the child, annotate the child with the parent's | in either parent that match the child, annotate the child with the parent's | ||||
data. | data. | ||||
Additionally, if `skipchild` is True, replace all other lines with parent | Additionally, if `skipchild` is True, replace all other lines with parent | ||||
annotate data as well such that child is never blamed for any lines. | annotate data as well such that child is never blamed for any lines. | ||||
See test-annotate.py for unit tests. | See test-annotate.py for unit tests. | ||||
''' | """ | ||||
pblocks = [ | pblocks = [ | ||||
(parent, mdiff.allblocks(parent.text, child.text, opts=diffopts)) | (parent, mdiff.allblocks(parent.text, child.text, opts=diffopts)) | ||||
for parent in parents | for parent in parents | ||||
] | ] | ||||
if skipchild: | if skipchild: | ||||
# Need to iterate over the blocks twice -- make it a list | # Need to iterate over the blocks twice -- make it a list | ||||
pblocks = [(p, list(blocks)) for (p, blocks) in pblocks] | pblocks = [(p, list(blocks)) for (p, blocks) in pblocks] |
addspaces=True, | addspaces=True, | ||||
wraplabels=False, | wraplabels=False, | ||||
wrapannotations=False, | wrapannotations=False, | ||||
wrapcommands=False, | wrapcommands=False, | ||||
wrapnonlinear=False, | wrapnonlinear=False, | ||||
usedots=False, | usedots=False, | ||||
maxlinewidth=70, | maxlinewidth=70, | ||||
): | ): | ||||
'''generates lines of a textual representation for a dag event stream | """generates lines of a textual representation for a dag event stream | ||||
events should generate what parsedag() does, so: | events should generate what parsedag() does, so: | ||||
('n', (id, [parentids])) for node creation | ('n', (id, [parentids])) for node creation | ||||
('l', (id, labelname)) for labels on nodes | ('l', (id, labelname)) for labels on nodes | ||||
('a', text) for annotations | ('a', text) for annotations | ||||
('c', text) for commands | ('c', text) for commands | ||||
('C', text) for line commands ('!!') | ('C', text) for line commands ('!!') | ||||
>>> dagtext([]) | >>> dagtext([]) | ||||
'' | '' | ||||
Combining parsedag and dagtext: | Combining parsedag and dagtext: | ||||
>>> dagtext(parsedag(b'+1 :f +1 :p2 *f */p2')) | >>> dagtext(parsedag(b'+1 :f +1 :p2 *f */p2')) | ||||
'+1 :f +1 :p2 *f */p2' | '+1 :f +1 :p2 *f */p2' | ||||
''' | """ | ||||
return b"\n".join( | return b"\n".join( | ||||
dagtextlines( | dagtextlines( | ||||
dag, | dag, | ||||
addspaces, | addspaces, | ||||
wraplabels, | wraplabels, | ||||
wrapannotations, | wrapannotations, | ||||
wrapcommands, | wrapcommands, | ||||
wrapnonlinear, | wrapnonlinear, | ||||
usedots, | usedots, | ||||
maxlinewidth, | maxlinewidth, | ||||
) | ) | ||||
) | ) |
b"common heads: %s\n" % b" ".join(sorted(short(n) for n in common)) | b"common heads: %s\n" % b" ".join(sorted(short(n) for n in common)) | ||||
) | ) | ||||
_chunksize = 4 << 10 | _chunksize = 4 << 10 | ||||
@command( | @command( | ||||
b'debugdownload', [(b'o', b'output', b'', _(b'path')),], optionalrepo=True | b'debugdownload', | ||||
[ | |||||
(b'o', b'output', b'', _(b'path')), | |||||
], | |||||
optionalrepo=True, | |||||
) | ) | ||||
def debugdownload(ui, repo, url, output=None, **opts): | def debugdownload(ui, repo, url, output=None, **opts): | ||||
"""download a resource using Mercurial logic and config | """download a resource using Mercurial logic and config""" | ||||
""" | |||||
fh = urlmod.open(ui, url, output) | fh = urlmod.open(ui, url, output) | ||||
dest = ui | dest = ui | ||||
if output: | if output: | ||||
dest = open(output, b"wb", _chunksize) | dest = open(output, b"wb", _chunksize) | ||||
try: | try: | ||||
data = fh.read(_chunksize) | data = fh.read(_chunksize) | ||||
while data: | while data: | ||||
if not util.safehasattr(index, b'stats'): | if not util.safehasattr(index, b'stats'): | ||||
raise error.Abort(_(b'debugindexstats only works with native code')) | raise error.Abort(_(b'debugindexstats only works with native code')) | ||||
for k, v in sorted(index.stats().items()): | for k, v in sorted(index.stats().items()): | ||||
ui.write(b'%s: %d\n' % (k, v)) | ui.write(b'%s: %d\n' % (k, v)) | ||||
@command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True) | @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True) | ||||
def debuginstall(ui, **opts): | def debuginstall(ui, **opts): | ||||
'''test Mercurial installation | """test Mercurial installation | ||||
Returns 0 on success. | Returns 0 on success. | ||||
''' | """ | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
problems = 0 | problems = 0 | ||||
fm = ui.formatter(b'debuginstall', opts) | fm = ui.formatter(b'debuginstall', opts) | ||||
fm.startitem() | fm.startitem() | ||||
# encoding might be unknown or wrong. don't translate these messages. | # encoding might be unknown or wrong. don't translate these messages. | ||||
b'', | b'', | ||||
b'metadata', | b'metadata', | ||||
False, | False, | ||||
_(b'display the on disk meta data for the nodemap'), | _(b'display the on disk meta data for the nodemap'), | ||||
), | ), | ||||
], | ], | ||||
) | ) | ||||
def debugnodemap(ui, repo, **opts): | def debugnodemap(ui, repo, **opts): | ||||
"""write and inspect on disk nodemap | """write and inspect on disk nodemap""" | ||||
""" | |||||
if opts['dump_new']: | if opts['dump_new']: | ||||
unfi = repo.unfiltered() | unfi = repo.unfiltered() | ||||
cl = unfi.changelog | cl = unfi.changelog | ||||
if util.safehasattr(cl.index, "nodemap_data_all"): | if util.safehasattr(cl.index, "nodemap_data_all"): | ||||
data = cl.index.nodemap_data_all() | data = cl.index.nodemap_data_all() | ||||
else: | else: | ||||
data = nodemap.persistent_data(cl.index) | data = nodemap.persistent_data(cl.index) | ||||
ui.write(data) | ui.write(data) | ||||
(b'f', b'full', None, _(b'complete an entire path')), | (b'f', b'full', None, _(b'complete an entire path')), | ||||
(b'n', b'normal', None, _(b'show only normal files')), | (b'n', b'normal', None, _(b'show only normal files')), | ||||
(b'a', b'added', None, _(b'show only added files')), | (b'a', b'added', None, _(b'show only added files')), | ||||
(b'r', b'removed', None, _(b'show only removed files')), | (b'r', b'removed', None, _(b'show only removed files')), | ||||
], | ], | ||||
_(b'FILESPEC...'), | _(b'FILESPEC...'), | ||||
) | ) | ||||
def debugpathcomplete(ui, repo, *specs, **opts): | def debugpathcomplete(ui, repo, *specs, **opts): | ||||
'''complete part or all of a tracked path | """complete part or all of a tracked path | ||||
This command supports shells that offer path name completion. It | This command supports shells that offer path name completion. It | ||||
currently completes only files already known to the dirstate. | currently completes only files already known to the dirstate. | ||||
Completion extends only to the next path segment unless | Completion extends only to the next path segment unless | ||||
--full is specified, in which case entire paths are used.''' | --full is specified, in which case entire paths are used.""" | ||||
def complete(path, acceptable): | def complete(path, acceptable): | ||||
dirstate = repo.dirstate | dirstate = repo.dirstate | ||||
spec = os.path.normpath(os.path.join(encoding.getcwd(), path)) | spec = os.path.normpath(os.path.join(encoding.getcwd(), path)) | ||||
rootdir = repo.root + pycompat.ossep | rootdir = repo.root + pycompat.ossep | ||||
if spec != repo.root and not spec.startswith(rootdir): | if spec != repo.root and not spec.startswith(rootdir): | ||||
return [], [] | return [], [] | ||||
if os.path.isdir(spec): | if os.path.isdir(spec): | ||||
finally: | finally: | ||||
if not ui.debugflag: | if not ui.debugflag: | ||||
ui.popbuffer() | ui.popbuffer() | ||||
ui.write(b'%s = %s\n' % (path, tool)) | ui.write(b'%s = %s\n' % (path, tool)) | ||||
@command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True) | @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True) | ||||
def debugpushkey(ui, repopath, namespace, *keyinfo, **opts): | def debugpushkey(ui, repopath, namespace, *keyinfo, **opts): | ||||
'''access the pushkey key/value protocol | """access the pushkey key/value protocol | ||||
With two args, list the keys in the given namespace. | With two args, list the keys in the given namespace. | ||||
With five args, set a key to new if it currently is set to old. | With five args, set a key to new if it currently is set to old. | ||||
Reports success or failure. | Reports success or failure. | ||||
''' | """ | ||||
target = hg.peer(ui, {}, repopath) | target = hg.peer(ui, {}, repopath) | ||||
if keyinfo: | if keyinfo: | ||||
key, old, new = keyinfo | key, old, new = keyinfo | ||||
with target.commandexecutor() as e: | with target.commandexecutor() as e: | ||||
r = e.callcommand( | r = e.callcommand( | ||||
b'pushkey', | b'pushkey', | ||||
{ | { | ||||
for key, value in sidedata: | for key, value in sidedata: | ||||
ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value))) | ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value))) | ||||
if ui.verbose: | if ui.verbose: | ||||
ui.writenoi18n(b' %s\n' % stringutil.pprint(value)) | ui.writenoi18n(b' %s\n' % stringutil.pprint(value)) | ||||
@command(b'debugssl', [], b'[SOURCE]', optionalrepo=True) | @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True) | ||||
def debugssl(ui, repo, source=None, **opts): | def debugssl(ui, repo, source=None, **opts): | ||||
'''test a secure connection to a server | """test a secure connection to a server | ||||
This builds the certificate chain for the server on Windows, installing the | This builds the certificate chain for the server on Windows, installing the | ||||
missing intermediates and trusted root via Windows Update if necessary. It | missing intermediates and trusted root via Windows Update if necessary. It | ||||
does nothing on other platforms. | does nothing on other platforms. | ||||
If SOURCE is omitted, the 'default' path will be used. If a URL is given, | If SOURCE is omitted, the 'default' path will be used. If a URL is given, | ||||
that server is used. See :hg:`help urls` for more information. | that server is used. See :hg:`help urls` for more information. | ||||
If the update succeeds, retry the original operation. Otherwise, the cause | If the update succeeds, retry the original operation. Otherwise, the cause | ||||
of the SSL error is likely another issue. | of the SSL error is likely another issue. | ||||
''' | """ | ||||
if not pycompat.iswindows: | if not pycompat.iswindows: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'certificate chain building is only possible on Windows') | _(b'certificate chain building is only possible on Windows') | ||||
) | ) | ||||
if not source: | if not source: | ||||
if not repo: | if not repo: | ||||
raise error.Abort( | raise error.Abort( | ||||
ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs))) | ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs))) | ||||
for r in revs: | for r in revs: | ||||
displayer.show(repo[r], **pycompat.strkwargs(props)) | displayer.show(repo[r], **pycompat.strkwargs(props)) | ||||
displayer.close() | displayer.close() | ||||
@command( | @command( | ||||
b'debuguigetpass', | b'debuguigetpass', | ||||
[(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),], | [ | ||||
(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')), | |||||
], | |||||
_(b'[-p TEXT]'), | _(b'[-p TEXT]'), | ||||
norepo=True, | norepo=True, | ||||
) | ) | ||||
def debuguigetpass(ui, prompt=b''): | def debuguigetpass(ui, prompt=b''): | ||||
"""show prompt to type password""" | """show prompt to type password""" | ||||
r = ui.getpass(prompt) | r = ui.getpass(prompt) | ||||
if r is not None: | if r is not None: | ||||
r = encoding.strtolocal(r) | r = encoding.strtolocal(r) | ||||
else: | else: | ||||
r = b"<default response>" | r = b"<default response>" | ||||
ui.writenoi18n(b'response: %s\n' % r) | ui.writenoi18n(b'response: %s\n' % r) | ||||
@command( | @command( | ||||
b'debuguiprompt', | b'debuguiprompt', | ||||
[(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),], | [ | ||||
(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')), | |||||
], | |||||
_(b'[-p TEXT]'), | _(b'[-p TEXT]'), | ||||
norepo=True, | norepo=True, | ||||
) | ) | ||||
def debuguiprompt(ui, prompt=b''): | def debuguiprompt(ui, prompt=b''): | ||||
"""show plain prompt""" | """show plain prompt""" | ||||
r = ui.prompt(prompt) | r = ui.prompt(prompt) | ||||
ui.writenoi18n(b'response: %s\n' % r) | ui.writenoi18n(b'response: %s\n' % r) | ||||
} | } | ||||
# Turn pipes/sockets into observers so we can log I/O. | # Turn pipes/sockets into observers so we can log I/O. | ||||
if ui.verbose: | if ui.verbose: | ||||
openerargs.update( | openerargs.update( | ||||
{ | { | ||||
'loggingfh': ui, | 'loggingfh': ui, | ||||
'loggingname': b's', | 'loggingname': b's', | ||||
'loggingopts': {'logdata': True, 'logdataapis': False,}, | 'loggingopts': { | ||||
'logdata': True, | |||||
'logdataapis': False, | |||||
}, | |||||
} | } | ||||
) | ) | ||||
if ui.debugflag: | if ui.debugflag: | ||||
openerargs['loggingopts']['logdataapis'] = True | openerargs['loggingopts']['logdataapis'] = True | ||||
# Don't send default headers when in raw mode. This allows us to | # Don't send default headers when in raw mode. This allows us to | ||||
# bypass most of the behavior of our URL handling code so we can | # bypass most of the behavior of our URL handling code so we can |
opts=None, | opts=None, | ||||
untrusted=False, | untrusted=False, | ||||
section=b'diff', | section=b'diff', | ||||
git=False, | git=False, | ||||
whitespace=False, | whitespace=False, | ||||
formatchanging=False, | formatchanging=False, | ||||
configprefix=b'', | configprefix=b'', | ||||
): | ): | ||||
'''return diffopts with only opted-in features parsed | """return diffopts with only opted-in features parsed | ||||
Features: | Features: | ||||
- git: git-style diffs | - git: git-style diffs | ||||
- whitespace: whitespace options like ignoreblanklines and ignorews | - whitespace: whitespace options like ignoreblanklines and ignorews | ||||
- formatchanging: options that will likely break or cause correctness issues | - formatchanging: options that will likely break or cause correctness issues | ||||
with most diff parsers | with most diff parsers | ||||
''' | """ | ||||
def get(key, name=None, getter=ui.configbool, forceplain=None): | def get(key, name=None, getter=ui.configbool, forceplain=None): | ||||
if opts: | if opts: | ||||
v = opts.get(key) | v = opts.get(key) | ||||
# diffopts flags are either None-default (which is passed | # diffopts flags are either None-default (which is passed | ||||
# through unchanged, so we can identify unset values), or | # through unchanged, so we can identify unset values), or | ||||
# some other falsey default (eg --unified, which defaults | # some other falsey default (eg --unified, which defaults | ||||
# to an empty string). We only want to override the config | # to an empty string). We only want to override the config |
finally: | finally: | ||||
os.close(tmpfd) | os.close(tmpfd) | ||||
vfs.unlink(tmpname) | vfs.unlink(tmpname) | ||||
@interfaceutil.implementer(intdirstate.idirstate) | @interfaceutil.implementer(intdirstate.idirstate) | ||||
class dirstate(object): | class dirstate(object): | ||||
def __init__(self, opener, ui, root, validate, sparsematchfn): | def __init__(self, opener, ui, root, validate, sparsematchfn): | ||||
'''Create a new dirstate object. | """Create a new dirstate object. | ||||
opener is an open()-like callable that can be used to open the | opener is an open()-like callable that can be used to open the | ||||
dirstate file; root is the root of the directory tracked by | dirstate file; root is the root of the directory tracked by | ||||
the dirstate. | the dirstate. | ||||
''' | """ | ||||
self._opener = opener | self._opener = opener | ||||
self._validate = validate | self._validate = validate | ||||
self._root = root | self._root = root | ||||
self._sparsematchfn = sparsematchfn | self._sparsematchfn = sparsematchfn | ||||
# ntpath.join(root, '') of Python 2.7.9 does not add sep if root is | # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is | ||||
# UNC path pointing to root share (issue4557) | # UNC path pointing to root share (issue4557) | ||||
self._rootdir = pathutil.normasprefix(root) | self._rootdir = pathutil.normasprefix(root) | ||||
self._dirty = False | self._dirty = False | ||||
"""make sure the parents are loaded | """make sure the parents are loaded | ||||
Used to avoid a race condition. | Used to avoid a race condition. | ||||
""" | """ | ||||
self._pl | self._pl | ||||
@contextlib.contextmanager | @contextlib.contextmanager | ||||
def parentchange(self): | def parentchange(self): | ||||
'''Context manager for handling dirstate parents. | """Context manager for handling dirstate parents. | ||||
If an exception occurs in the scope of the context manager, | If an exception occurs in the scope of the context manager, | ||||
the incoherent dirstate won't be written when wlock is | the incoherent dirstate won't be written when wlock is | ||||
released. | released. | ||||
''' | """ | ||||
self._parentwriters += 1 | self._parentwriters += 1 | ||||
yield | yield | ||||
# Typically we want the "undo" step of a context manager in a | # Typically we want the "undo" step of a context manager in a | ||||
# finally block so it happens even when an exception | # finally block so it happens even when an exception | ||||
# occurs. In this case, however, we only want to decrement | # occurs. In this case, however, we only want to decrement | ||||
# parentwriters if the code in the with statement exits | # parentwriters if the code in the with statement exits | ||||
# normally, so we don't have a try/finally here on purpose. | # normally, so we don't have a try/finally here on purpose. | ||||
self._parentwriters -= 1 | self._parentwriters -= 1 | ||||
def pendingparentchange(self): | def pendingparentchange(self): | ||||
'''Returns true if the dirstate is in the middle of a set of changes | """Returns true if the dirstate is in the middle of a set of changes | ||||
that modify the dirstate parent. | that modify the dirstate parent. | ||||
''' | """ | ||||
return self._parentwriters > 0 | return self._parentwriters > 0 | ||||
@propertycache | @propertycache | ||||
def _map(self): | def _map(self): | ||||
"""Return the dirstate contents (see documentation for dirstatemap).""" | """Return the dirstate contents (see documentation for dirstatemap).""" | ||||
self._map = self._mapcls(self._ui, self._opener, self._root) | self._map = self._mapcls(self._ui, self._opener, self._root) | ||||
return self._map | return self._map | ||||
def _cwd(self): | def _cwd(self): | ||||
# internal config: ui.forcecwd | # internal config: ui.forcecwd | ||||
forcecwd = self._ui.config(b'ui', b'forcecwd') | forcecwd = self._ui.config(b'ui', b'forcecwd') | ||||
if forcecwd: | if forcecwd: | ||||
return forcecwd | return forcecwd | ||||
return encoding.getcwd() | return encoding.getcwd() | ||||
def getcwd(self): | def getcwd(self): | ||||
'''Return the path from which a canonical path is calculated. | """Return the path from which a canonical path is calculated. | ||||
This path should be used to resolve file patterns or to convert | This path should be used to resolve file patterns or to convert | ||||
canonical paths back to file paths for display. It shouldn't be | canonical paths back to file paths for display. It shouldn't be | ||||
used to get real file paths. Use vfs functions instead. | used to get real file paths. Use vfs functions instead. | ||||
''' | """ | ||||
cwd = self._cwd | cwd = self._cwd | ||||
if cwd == self._root: | if cwd == self._root: | ||||
return b'' | return b'' | ||||
# self._root ends with a path separator if self._root is '/' or 'C:\' | # self._root ends with a path separator if self._root is '/' or 'C:\' | ||||
rootsep = self._root | rootsep = self._root | ||||
if not util.endswithsep(rootsep): | if not util.endswithsep(rootsep): | ||||
rootsep += pycompat.ossep | rootsep += pycompat.ossep | ||||
if cwd.startswith(rootsep): | if cwd.startswith(rootsep): | ||||
return cwd[len(rootsep) :] | return cwd[len(rootsep) :] | ||||
else: | else: | ||||
# we're outside the repo. return an absolute path. | # we're outside the repo. return an absolute path. | ||||
return cwd | return cwd | ||||
def pathto(self, f, cwd=None): | def pathto(self, f, cwd=None): | ||||
if cwd is None: | if cwd is None: | ||||
cwd = self.getcwd() | cwd = self.getcwd() | ||||
path = util.pathto(self._root, cwd, f) | path = util.pathto(self._root, cwd, f) | ||||
if self._slash: | if self._slash: | ||||
return util.pconvert(path) | return util.pconvert(path) | ||||
return path | return path | ||||
def __getitem__(self, key): | def __getitem__(self, key): | ||||
'''Return the current state of key (a filename) in the dirstate. | """Return the current state of key (a filename) in the dirstate. | ||||
States are: | States are: | ||||
n normal | n normal | ||||
m needs merging | m needs merging | ||||
r marked for removal | r marked for removal | ||||
a marked for addition | a marked for addition | ||||
? not tracked | ? not tracked | ||||
''' | """ | ||||
return self._map.get(key, (b"?",))[0] | return self._map.get(key, (b"?",))[0] | ||||
def __contains__(self, key): | def __contains__(self, key): | ||||
return key in self._map | return key in self._map | ||||
def __iter__(self): | def __iter__(self): | ||||
return iter(sorted(self._map)) | return iter(sorted(self._map)) | ||||
ce = self._filecache[b'_branch'] | ce = self._filecache[b'_branch'] | ||||
if ce: | if ce: | ||||
ce.refresh() | ce.refresh() | ||||
except: # re-raises | except: # re-raises | ||||
f.discard() | f.discard() | ||||
raise | raise | ||||
def invalidate(self): | def invalidate(self): | ||||
'''Causes the next access to reread the dirstate. | """Causes the next access to reread the dirstate. | ||||
This is different from localrepo.invalidatedirstate() because it always | This is different from localrepo.invalidatedirstate() because it always | ||||
rereads the dirstate. Use localrepo.invalidatedirstate() if you want to | rereads the dirstate. Use localrepo.invalidatedirstate() if you want to | ||||
check whether the dirstate has changed before rereading it.''' | check whether the dirstate has changed before rereading it.""" | ||||
for a in ("_map", "_branch", "_ignore"): | for a in ("_map", "_branch", "_ignore"): | ||||
if a in self.__dict__: | if a in self.__dict__: | ||||
delattr(self, a) | delattr(self, a) | ||||
self._lastnormaltime = 0 | self._lastnormaltime = 0 | ||||
self._dirty = False | self._dirty = False | ||||
self._updatedfiles.clear() | self._updatedfiles.clear() | ||||
self._parentwriters = 0 | self._parentwriters = 0 | ||||
_(b'file %r in dirstate clashes with %r') | _(b'file %r in dirstate clashes with %r') | ||||
% (pycompat.bytestr(d), pycompat.bytestr(f)) | % (pycompat.bytestr(d), pycompat.bytestr(f)) | ||||
) | ) | ||||
self._dirty = True | self._dirty = True | ||||
self._updatedfiles.add(f) | self._updatedfiles.add(f) | ||||
self._map.addfile(f, oldstate, state, mode, size, mtime) | self._map.addfile(f, oldstate, state, mode, size, mtime) | ||||
def normal(self, f, parentfiledata=None): | def normal(self, f, parentfiledata=None): | ||||
'''Mark a file normal and clean. | """Mark a file normal and clean. | ||||
parentfiledata: (mode, size, mtime) of the clean file | parentfiledata: (mode, size, mtime) of the clean file | ||||
parentfiledata should be computed from memory (for mode, | parentfiledata should be computed from memory (for mode, | ||||
size), as or close as possible from the point where we | size), as or close as possible from the point where we | ||||
determined the file was clean, to limit the risk of the | determined the file was clean, to limit the risk of the | ||||
file having been changed by an external process between the | file having been changed by an external process between the | ||||
moment where the file was determined to be clean and now.''' | moment where the file was determined to be clean and now.""" | ||||
if parentfiledata: | if parentfiledata: | ||||
(mode, size, mtime) = parentfiledata | (mode, size, mtime) = parentfiledata | ||||
else: | else: | ||||
s = os.lstat(self._join(f)) | s = os.lstat(self._join(f)) | ||||
mode = s.st_mode | mode = s.st_mode | ||||
size = s.st_size | size = s.st_size | ||||
mtime = s[stat.ST_MTIME] | mtime = s[stat.ST_MTIME] | ||||
self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask) | self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask) | ||||
# store discovered result in dirfoldmap so that future | # store discovered result in dirfoldmap so that future | ||||
# normalizefile calls don't start matching directories | # normalizefile calls don't start matching directories | ||||
folded = self._discoverpath( | folded = self._discoverpath( | ||||
path, normed, ignoremissing, exists, self._map.dirfoldmap | path, normed, ignoremissing, exists, self._map.dirfoldmap | ||||
) | ) | ||||
return folded | return folded | ||||
def normalize(self, path, isknown=False, ignoremissing=False): | def normalize(self, path, isknown=False, ignoremissing=False): | ||||
''' | """ | ||||
normalize the case of a pathname when on a casefolding filesystem | normalize the case of a pathname when on a casefolding filesystem | ||||
isknown specifies whether the filename came from walking the | isknown specifies whether the filename came from walking the | ||||
disk, to avoid extra filesystem access. | disk, to avoid extra filesystem access. | ||||
If ignoremissing is True, missing path are returned | If ignoremissing is True, missing path are returned | ||||
unchanged. Otherwise, we try harder to normalize possibly | unchanged. Otherwise, we try harder to normalize possibly | ||||
existing path components. | existing path components. | ||||
The normalized case is determined based on the following precedence: | The normalized case is determined based on the following precedence: | ||||
- version of name already stored in the dirstate | - version of name already stored in the dirstate | ||||
- version of name stored on disk | - version of name stored on disk | ||||
- version provided via command arguments | - version provided via command arguments | ||||
''' | """ | ||||
if self._checkcase: | if self._checkcase: | ||||
return self._normalize(path, isknown, ignoremissing) | return self._normalize(path, isknown, ignoremissing) | ||||
return path | return path | ||||
def clear(self): | def clear(self): | ||||
self._map.clear() | self._map.clear() | ||||
self._lastnormaltime = 0 | self._lastnormaltime = 0 | ||||
for f in to_lookup: | for f in to_lookup: | ||||
self.normallookup(f) | self.normallookup(f) | ||||
for f in to_drop: | for f in to_drop: | ||||
self.drop(f) | self.drop(f) | ||||
self._dirty = True | self._dirty = True | ||||
def identity(self): | def identity(self): | ||||
'''Return identity of dirstate itself to detect changing in storage | """Return identity of dirstate itself to detect changing in storage | ||||
If identity of previous dirstate is equal to this, writing | If identity of previous dirstate is equal to this, writing | ||||
changes based on the former dirstate out can keep consistency. | changes based on the former dirstate out can keep consistency. | ||||
''' | """ | ||||
return self._map.identity | return self._map.identity | ||||
def write(self, tr): | def write(self, tr): | ||||
if not self._dirty: | if not self._dirty: | ||||
return | return | ||||
filename = self._filename | filename = self._filename | ||||
if tr: | if tr: | ||||
self._root, b'', [], [pattern], warn=self._ui.warn | self._root, b'', [], [pattern], warn=self._ui.warn | ||||
) | ) | ||||
if m(f): | if m(f): | ||||
return (i, lineno, line) | return (i, lineno, line) | ||||
visited.add(i) | visited.add(i) | ||||
return (None, -1, b"") | return (None, -1, b"") | ||||
def _walkexplicit(self, match, subrepos): | def _walkexplicit(self, match, subrepos): | ||||
'''Get stat data about the files explicitly specified by match. | """Get stat data about the files explicitly specified by match. | ||||
Return a triple (results, dirsfound, dirsnotfound). | Return a triple (results, dirsfound, dirsnotfound). | ||||
- results is a mapping from filename to stat result. It also contains | - results is a mapping from filename to stat result. It also contains | ||||
listings mapping subrepos and .hg to None. | listings mapping subrepos and .hg to None. | ||||
- dirsfound is a list of files found to be directories. | - dirsfound is a list of files found to be directories. | ||||
- dirsnotfound is a list of files that the dirstate thinks are | - dirsnotfound is a list of files that the dirstate thinks are | ||||
directories and that were not found.''' | directories and that were not found.""" | ||||
def badtype(mode): | def badtype(mode): | ||||
kind = _(b'unknown') | kind = _(b'unknown') | ||||
if stat.S_ISCHR(mode): | if stat.S_ISCHR(mode): | ||||
kind = _(b'character device') | kind = _(b'character device') | ||||
elif stat.S_ISBLK(mode): | elif stat.S_ISBLK(mode): | ||||
kind = _(b'block device') | kind = _(b'block device') | ||||
elif stat.S_ISFIFO(mode): | elif stat.S_ISFIFO(mode): | ||||
path, norm, True, None, self._map.dirfoldmap | path, norm, True, None, self._map.dirfoldmap | ||||
) | ) | ||||
if path != folded: | if path != folded: | ||||
results[path] = None | results[path] = None | ||||
return results, dirsfound, dirsnotfound | return results, dirsfound, dirsnotfound | ||||
def walk(self, match, subrepos, unknown, ignored, full=True): | def walk(self, match, subrepos, unknown, ignored, full=True): | ||||
''' | """ | ||||
Walk recursively through the directory tree, finding all files | Walk recursively through the directory tree, finding all files | ||||
matched by match. | matched by match. | ||||
If full is False, maybe skip some known-clean files. | If full is False, maybe skip some known-clean files. | ||||
Return a dict mapping filename to stat-like object (either | Return a dict mapping filename to stat-like object (either | ||||
mercurial.osutil.stat instance or return value of os.stat()). | mercurial.osutil.stat instance or return value of os.stat()). | ||||
''' | """ | ||||
# full is a flag that extensions that hook into walk can use -- this | # full is a flag that extensions that hook into walk can use -- this | ||||
# implementation doesn't use it at all. This satisfies the contract | # implementation doesn't use it at all. This satisfies the contract | ||||
# because we only guarantee a "maybe". | # because we only guarantee a "maybe". | ||||
if ignored: | if ignored: | ||||
ignore = util.never | ignore = util.never | ||||
dirignore = util.never | dirignore = util.never | ||||
elif unknown: | elif unknown: | ||||
deleted=deleted, | deleted=deleted, | ||||
unknown=unknown, | unknown=unknown, | ||||
ignored=ignored, | ignored=ignored, | ||||
clean=clean, | clean=clean, | ||||
) | ) | ||||
return (lookup, status) | return (lookup, status) | ||||
def status(self, match, subrepos, ignored, clean, unknown): | def status(self, match, subrepos, ignored, clean, unknown): | ||||
'''Determine the status of the working copy relative to the | """Determine the status of the working copy relative to the | ||||
dirstate and return a pair of (unsure, status), where status is of type | dirstate and return a pair of (unsure, status), where status is of type | ||||
scmutil.status and: | scmutil.status and: | ||||
unsure: | unsure: | ||||
files that might have been modified since the dirstate was | files that might have been modified since the dirstate was | ||||
written, but need to be read to be sure (size is the same | written, but need to be read to be sure (size is the same | ||||
but mtime differs) | but mtime differs) | ||||
status.modified: | status.modified: | ||||
files that have definitely been modified since the dirstate | files that have definitely been modified since the dirstate | ||||
was written (different size or mode) | was written (different size or mode) | ||||
status.clean: | status.clean: | ||||
files that have definitely not been modified since the | files that have definitely not been modified since the | ||||
dirstate was written | dirstate was written | ||||
''' | """ | ||||
listignored, listclean, listunknown = ignored, clean, unknown | listignored, listclean, listunknown = ignored, clean, unknown | ||||
lookup, modified, added, unknown, ignored = [], [], [], [], [] | lookup, modified, added, unknown, ignored = [], [], [], [], [] | ||||
removed, deleted, clean = [], [], [] | removed, deleted, clean = [], [], [] | ||||
dmap = self._map | dmap = self._map | ||||
dmap.preload() | dmap.preload() | ||||
use_rust = True | use_rust = True | ||||
elif state == b'r': | elif state == b'r': | ||||
radd(fn) | radd(fn) | ||||
status = scmutil.status( | status = scmutil.status( | ||||
modified, added, removed, deleted, unknown, ignored, clean | modified, added, removed, deleted, unknown, ignored, clean | ||||
) | ) | ||||
return (lookup, status) | return (lookup, status) | ||||
def matches(self, match): | def matches(self, match): | ||||
''' | """ | ||||
return files in the dirstate (in whatever state) filtered by match | return files in the dirstate (in whatever state) filtered by match | ||||
''' | """ | ||||
dmap = self._map | dmap = self._map | ||||
if rustmod is not None: | if rustmod is not None: | ||||
dmap = self._map._rustmap | dmap = self._map._rustmap | ||||
if match.always(): | if match.always(): | ||||
return dmap.keys() | return dmap.keys() | ||||
files = match.files() | files = match.files() | ||||
if match.isexact(): | if match.isexact(): |
from . import ( | from . import ( | ||||
error, | error, | ||||
narrowspec, | narrowspec, | ||||
util, | util, | ||||
) | ) | ||||
class dirstateguard(util.transactional): | class dirstateguard(util.transactional): | ||||
'''Restore dirstate at unexpected failure. | """Restore dirstate at unexpected failure. | ||||
At the construction, this class does: | At the construction, this class does: | ||||
- write current ``repo.dirstate`` out, and | - write current ``repo.dirstate`` out, and | ||||
- save ``.hg/dirstate`` into the backup file | - save ``.hg/dirstate`` into the backup file | ||||
This restores ``.hg/dirstate`` from backup file, if ``release()`` | This restores ``.hg/dirstate`` from backup file, if ``release()`` | ||||
is invoked before ``close()``. | is invoked before ``close()``. | ||||
This just removes the backup file at ``close()`` before ``release()``. | This just removes the backup file at ``close()`` before ``release()``. | ||||
''' | """ | ||||
def __init__(self, repo, name): | def __init__(self, repo, name): | ||||
self._repo = repo | self._repo = repo | ||||
self._active = False | self._active = False | ||||
self._closed = False | self._closed = False | ||||
self._backupname = b'dirstate.backup.%s.%d' % (name, id(self)) | self._backupname = b'dirstate.backup.%s.%d' % (name, id(self)) | ||||
self._narrowspecbackupname = b'narrowspec.backup.%s.%d' % ( | self._narrowspecbackupname = b'narrowspec.backup.%s.%d' % ( | ||||
name, | name, |
if heads and not anyinc: | if heads and not anyinc: | ||||
# server could be lying on the advertised heads | # server could be lying on the advertised heads | ||||
has_node = repo.changelog.hasnode | has_node = repo.changelog.hasnode | ||||
anyinc = any(not has_node(n) for n in heads) | anyinc = any(not has_node(n) for n in heads) | ||||
return (list(common), anyinc, heads or list(srvheads)) | return (list(common), anyinc, heads or list(srvheads)) | ||||
class outgoing(object): | class outgoing(object): | ||||
'''Represents the result of a findcommonoutgoing() call. | """Represents the result of a findcommonoutgoing() call. | ||||
Members: | Members: | ||||
ancestorsof is a list of the nodes whose ancestors are included in the | ancestorsof is a list of the nodes whose ancestors are included in the | ||||
outgoing operation. | outgoing operation. | ||||
missing is a list of those ancestors of ancestorsof that are present in | missing is a list of those ancestors of ancestorsof that are present in | ||||
local but not in remote. | local but not in remote. | ||||
common is a set containing revs common between the local and the remote | common is a set containing revs common between the local and the remote | ||||
repository (at least all of those that are ancestors of ancestorsof). | repository (at least all of those that are ancestors of ancestorsof). | ||||
commonheads is the list of heads of common. | commonheads is the list of heads of common. | ||||
excluded is the list of missing changeset that shouldn't be sent | excluded is the list of missing changeset that shouldn't be sent | ||||
remotely. | remotely. | ||||
Some members are computed on demand from the heads, unless provided upfront | Some members are computed on demand from the heads, unless provided upfront | ||||
by discovery.''' | by discovery.""" | ||||
def __init__( | def __init__( | ||||
self, repo, commonheads=None, ancestorsof=None, missingroots=None | self, repo, commonheads=None, ancestorsof=None, missingroots=None | ||||
): | ): | ||||
# at least one of them must not be set | # at least one of them must not be set | ||||
assert None in (commonheads, missingroots) | assert None in (commonheads, missingroots) | ||||
cl = repo.changelog | cl = repo.changelog | ||||
if ancestorsof is None: | if ancestorsof is None: | ||||
stacklevel=2, | stacklevel=2, | ||||
) | ) | ||||
return self.ancestorsof | return self.ancestorsof | ||||
def findcommonoutgoing( | def findcommonoutgoing( | ||||
repo, other, onlyheads=None, force=False, commoninc=None, portable=False | repo, other, onlyheads=None, force=False, commoninc=None, portable=False | ||||
): | ): | ||||
'''Return an outgoing instance to identify the nodes present in repo but | """Return an outgoing instance to identify the nodes present in repo but | ||||
not in other. | not in other. | ||||
If onlyheads is given, only nodes ancestral to nodes in onlyheads | If onlyheads is given, only nodes ancestral to nodes in onlyheads | ||||
(inclusive) are included. If you already know the local repo's heads, | (inclusive) are included. If you already know the local repo's heads, | ||||
passing them in onlyheads is faster than letting them be recomputed here. | passing them in onlyheads is faster than letting them be recomputed here. | ||||
If commoninc is given, it must be the result of a prior call to | If commoninc is given, it must be the result of a prior call to | ||||
findcommonincoming(repo, other, force) to avoid recomputing it here. | findcommonincoming(repo, other, force) to avoid recomputing it here. | ||||
If portable is given, compute more conservative common and ancestorsof, | If portable is given, compute more conservative common and ancestorsof, | ||||
to make bundles created from the instance more portable.''' | to make bundles created from the instance more portable.""" | ||||
# declare an empty outgoing object to be filled later | # declare an empty outgoing object to be filled later | ||||
og = outgoing(repo, None, None) | og = outgoing(repo, None, None) | ||||
# get common set if not provided | # get common set if not provided | ||||
if commoninc is None: | if commoninc is None: | ||||
commoninc = findcommonincoming( | commoninc = findcommonincoming( | ||||
repo, other, force=force, ancestorsof=onlyheads | repo, other, force=force, ancestorsof=onlyheads | ||||
) | ) | ||||
def _nowarnheads(pushop): | def _nowarnheads(pushop): | ||||
# Compute newly pushed bookmarks. We don't warn about bookmarked heads. | # Compute newly pushed bookmarks. We don't warn about bookmarked heads. | ||||
repo = pushop.repo.unfiltered() | repo = pushop.repo.unfiltered() | ||||
remote = pushop.remote | remote = pushop.remote | ||||
localbookmarks = repo._bookmarks | localbookmarks = repo._bookmarks | ||||
with remote.commandexecutor() as e: | with remote.commandexecutor() as e: | ||||
remotebookmarks = e.callcommand( | remotebookmarks = e.callcommand( | ||||
b'listkeys', {b'namespace': b'bookmarks',} | b'listkeys', | ||||
{ | |||||
b'namespace': b'bookmarks', | |||||
}, | |||||
).result() | ).result() | ||||
bookmarkedheads = set() | bookmarkedheads = set() | ||||
# internal config: bookmarks.pushing | # internal config: bookmarks.pushing | ||||
newbookmarks = [ | newbookmarks = [ | ||||
localbookmarks.expandname(b) | localbookmarks.expandname(b) | ||||
for b in pushop.ui.configlist(b'bookmarks', b'pushing') | for b in pushop.ui.configlist(b'bookmarks', b'pushing') | ||||
elif len(newhs) > len(oldhs): | elif len(newhs) > len(oldhs): | ||||
# remove bookmarked or existing remote heads from the new heads list | # remove bookmarked or existing remote heads from the new heads list | ||||
dhs = sorted(newhs - nowarnheads - oldhs) | dhs = sorted(newhs - nowarnheads - oldhs) | ||||
if dhs: | if dhs: | ||||
if errormsg is None: | if errormsg is None: | ||||
if branch not in (b'default', None): | if branch not in (b'default', None): | ||||
errormsg = _( | errormsg = _( | ||||
b"push creates new remote head %s on branch '%s'" | b"push creates new remote head %s on branch '%s'" | ||||
) % (short(dhs[0]), branch,) | ) % ( | ||||
short(dhs[0]), | |||||
branch, | |||||
) | |||||
elif repo[dhs[0]].bookmarks(): | elif repo[dhs[0]].bookmarks(): | ||||
errormsg = _( | errormsg = _( | ||||
b"push creates new remote head %s " | b"push creates new remote head %s " | ||||
b"with bookmark '%s'" | b"with bookmark '%s'" | ||||
) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0]) | ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0]) | ||||
else: | else: | ||||
errormsg = _(b"push creates new remote head %s") % short( | errormsg = _(b"push creates new remote head %s") % short( | ||||
dhs[0] | dhs[0] |
cmd = re.sub(br'\$(\d+|\$)', replacer, cmd) | cmd = re.sub(br'\$(\d+|\$)', replacer, cmd) | ||||
givenargs = [x for i, x in enumerate(givenargs) if i not in nums] | givenargs = [x for i, x in enumerate(givenargs) if i not in nums] | ||||
args = pycompat.shlexsplit(cmd) | args = pycompat.shlexsplit(cmd) | ||||
return args + givenargs | return args + givenargs | ||||
def aliasinterpolate(name, args, cmd): | def aliasinterpolate(name, args, cmd): | ||||
'''interpolate args into cmd for shell aliases | """interpolate args into cmd for shell aliases | ||||
This also handles $0, $@ and "$@". | This also handles $0, $@ and "$@". | ||||
''' | """ | ||||
# util.interpolate can't deal with "$@" (with quotes) because it's only | # util.interpolate can't deal with "$@" (with quotes) because it's only | ||||
# built to match prefix + patterns. | # built to match prefix + patterns. | ||||
replacemap = {b'$%d' % (i + 1): arg for i, arg in enumerate(args)} | replacemap = {b'$%d' % (i + 1): arg for i, arg in enumerate(args)} | ||||
replacemap[b'$0'] = name | replacemap[b'$0'] = name | ||||
replacemap[b'$$'] = b'$' | replacemap[b'$$'] = b'$' | ||||
replacemap[b'$@'] = b' '.join(args) | replacemap[b'$@'] = b' '.join(args) | ||||
# Typical Unix shells interpolate "$@" (with quotes) as all the positional | # Typical Unix shells interpolate "$@" (with quotes) as all the positional | ||||
# parameters, separated out into words. Emulate the same behavior here by | # parameters, separated out into words. Emulate the same behavior here by | ||||
cmdhelp = None | cmdhelp = None | ||||
self.alias = True | self.alias = True | ||||
self._populatehelp(ui, name, cmd, self.fn, cmdhelp) | self._populatehelp(ui, name, cmd, self.fn, cmdhelp) | ||||
except error.UnknownCommand: | except error.UnknownCommand: | ||||
self.badalias = _( | self.badalias = _( | ||||
b"alias '%s' resolves to unknown command '%s'" | b"alias '%s' resolves to unknown command '%s'" | ||||
) % (self.name, cmd,) | ) % ( | ||||
self.name, | |||||
cmd, | |||||
) | |||||
self.unknowncmd = True | self.unknowncmd = True | ||||
except error.AmbiguousCommand: | except error.AmbiguousCommand: | ||||
self.badalias = _( | self.badalias = _( | ||||
b"alias '%s' resolves to ambiguous command '%s'" | b"alias '%s' resolves to ambiguous command '%s'" | ||||
) % (self.name, cmd,) | ) % ( | ||||
self.name, | |||||
cmd, | |||||
) | |||||
def _populatehelp(self, ui, name, cmd, fn, defaulthelp=None): | def _populatehelp(self, ui, name, cmd, fn, defaulthelp=None): | ||||
# confine strings to be passed to i18n.gettext() | # confine strings to be passed to i18n.gettext() | ||||
cfg = {} | cfg = {} | ||||
for k in (b'doc', b'help', b'category'): | for k in (b'doc', b'help', b'category'): | ||||
v = ui.config(b'alias', b'%s:%s' % (name, k), None) | v = ui.config(b'alias', b'%s:%s' % (name, k), None) | ||||
if v is None: | if v is None: | ||||
continue | continue |
encoding = _encodingrewrites.get(encoding, encoding) | encoding = _encodingrewrites.get(encoding, encoding) | ||||
except locale.Error: | except locale.Error: | ||||
encoding = b'ascii' | encoding = b'ascii' | ||||
encodingmode = environ.get(b"HGENCODINGMODE", b"strict") | encodingmode = environ.get(b"HGENCODINGMODE", b"strict") | ||||
fallbackencoding = b'ISO-8859-1' | fallbackencoding = b'ISO-8859-1' | ||||
class localstr(bytes): | class localstr(bytes): | ||||
'''This class allows strings that are unmodified to be | """This class allows strings that are unmodified to be | ||||
round-tripped to the local encoding and back''' | round-tripped to the local encoding and back""" | ||||
def __new__(cls, u, l): | def __new__(cls, u, l): | ||||
s = bytes.__new__(cls, l) | s = bytes.__new__(cls, l) | ||||
s._utf8 = u | s._utf8 = u | ||||
return s | return s | ||||
if pycompat.TYPE_CHECKING: | if pycompat.TYPE_CHECKING: | ||||
# pseudo implementation to help pytype see localstr() constructor | # pseudo implementation to help pytype see localstr() constructor | ||||
eaw = getattr(unicodedata, 'east_asian_width', None) | eaw = getattr(unicodedata, 'east_asian_width', None) | ||||
if eaw is not None: | if eaw is not None: | ||||
return sum([eaw(c) in _wide and 2 or 1 for c in d]) | return sum([eaw(c) in _wide and 2 or 1 for c in d]) | ||||
return len(d) | return len(d) | ||||
def getcols(s, start, c): | def getcols(s, start, c): | ||||
# type: (bytes, int, int) -> bytes | # type: (bytes, int, int) -> bytes | ||||
'''Use colwidth to find a c-column substring of s starting at byte | """Use colwidth to find a c-column substring of s starting at byte | ||||
index start''' | index start""" | ||||
for x in pycompat.xrange(start + c, len(s)): | for x in pycompat.xrange(start + c, len(s)): | ||||
t = s[start:x] | t = s[start:x] | ||||
if colwidth(t) == c: | if colwidth(t) == c: | ||||
return t | return t | ||||
raise ValueError('substring not found') | raise ValueError('substring not found') | ||||
def trim(s, width, ellipsis=b'', leftside=False): | def trim(s, width, ellipsis=b'', leftside=False): | ||||
return uu.encode(_sysstr(encoding)) | return uu.encode(_sysstr(encoding)) | ||||
except UnicodeError: | except UnicodeError: | ||||
return s.upper() # we don't know how to fold this except in ASCII | return s.upper() # we don't know how to fold this except in ASCII | ||||
except LookupError as k: | except LookupError as k: | ||||
raise error.Abort(k, hint=b"please check your locale settings") | raise error.Abort(k, hint=b"please check your locale settings") | ||||
class normcasespecs(object): | class normcasespecs(object): | ||||
'''what a platform's normcase does to ASCII strings | """what a platform's normcase does to ASCII strings | ||||
This is specified per platform, and should be consistent with what normcase | This is specified per platform, and should be consistent with what normcase | ||||
on that platform actually does. | on that platform actually does. | ||||
lower: normcase lowercases ASCII strings | lower: normcase lowercases ASCII strings | ||||
upper: normcase uppercases ASCII strings | upper: normcase uppercases ASCII strings | ||||
other: the fallback function should always be called | other: the fallback function should always be called | ||||
This should be kept in sync with normcase_spec in util.h.''' | This should be kept in sync with normcase_spec in util.h.""" | ||||
lower = -1 | lower = -1 | ||||
upper = 1 | upper = 1 | ||||
other = 0 | other = 0 | ||||
def jsonescape(s, paranoid=False): | def jsonescape(s, paranoid=False): | ||||
# type: (Any, Any) -> Any | # type: (Any, Any) -> Any | ||||
'''returns a string suitable for JSON | """returns a string suitable for JSON | ||||
JSON is problematic for us because it doesn't support non-Unicode | JSON is problematic for us because it doesn't support non-Unicode | ||||
bytes. To deal with this, we take the following approach: | bytes. To deal with this, we take the following approach: | ||||
- localstr/safelocalstr objects are converted back to UTF-8 | - localstr/safelocalstr objects are converted back to UTF-8 | ||||
- valid UTF-8/ASCII strings are passed as-is | - valid UTF-8/ASCII strings are passed as-is | ||||
- other strings are converted to UTF-8b surrogate encoding | - other strings are converted to UTF-8b surrogate encoding | ||||
- apply JSON-specified string escaping | - apply JSON-specified string escaping | ||||
>>> jsonescape(b'a weird byte: \\xdd', paranoid=True) | >>> jsonescape(b'a weird byte: \\xdd', paranoid=True) | ||||
'a weird byte: \\\\udcdd' | 'a weird byte: \\\\udcdd' | ||||
>>> jsonescape(b'utf-8: caf\\xc3\\xa9', paranoid=True) | >>> jsonescape(b'utf-8: caf\\xc3\\xa9', paranoid=True) | ||||
'utf-8: caf\\\\u00e9' | 'utf-8: caf\\\\u00e9' | ||||
>>> jsonescape(b'non-BMP: \\xf0\\x9d\\x84\\x9e', paranoid=True) | >>> jsonescape(b'non-BMP: \\xf0\\x9d\\x84\\x9e', paranoid=True) | ||||
'non-BMP: \\\\ud834\\\\udd1e' | 'non-BMP: \\\\ud834\\\\udd1e' | ||||
>>> jsonescape(b'<foo@example.org>', paranoid=True) | >>> jsonescape(b'<foo@example.org>', paranoid=True) | ||||
'\\\\u003cfoo@example.org\\\\u003e' | '\\\\u003cfoo@example.org\\\\u003e' | ||||
''' | """ | ||||
u8chars = toutf8b(s) | u8chars = toutf8b(s) | ||||
try: | try: | ||||
return _jsonescapeu8fast(u8chars, paranoid) | return _jsonescapeu8fast(u8chars, paranoid) | ||||
except ValueError: | except ValueError: | ||||
pass | pass | ||||
return charencodepure.jsonescapeu8fallback(u8chars, paranoid) | return charencodepure.jsonescapeu8fallback(u8chars, paranoid) | ||||
# We need to decode/encode U+DCxx codes transparently since invalid UTF-8 | # We need to decode/encode U+DCxx codes transparently since invalid UTF-8 | ||||
# bytes are mapped to that range. | # bytes are mapped to that range. | ||||
if pycompat.ispy3: | if pycompat.ispy3: | ||||
_utf8strict = r'surrogatepass' | _utf8strict = r'surrogatepass' | ||||
else: | else: | ||||
_utf8strict = r'strict' | _utf8strict = r'strict' | ||||
_utf8len = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4] | _utf8len = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4] | ||||
def getutf8char(s, pos): | def getutf8char(s, pos): | ||||
# type: (bytes, int) -> bytes | # type: (bytes, int) -> bytes | ||||
'''get the next full utf-8 character in the given string, starting at pos | """get the next full utf-8 character in the given string, starting at pos | ||||
Raises a UnicodeError if the given location does not start a valid | Raises a UnicodeError if the given location does not start a valid | ||||
utf-8 character. | utf-8 character. | ||||
''' | """ | ||||
# find how many bytes to attempt decoding from first nibble | # find how many bytes to attempt decoding from first nibble | ||||
l = _utf8len[ord(s[pos : pos + 1]) >> 4] | l = _utf8len[ord(s[pos : pos + 1]) >> 4] | ||||
if not l: # ascii | if not l: # ascii | ||||
return s[pos : pos + 1] | return s[pos : pos + 1] | ||||
c = s[pos : pos + l] | c = s[pos : pos + l] | ||||
# validate with attempted decode | # validate with attempted decode | ||||
c.decode("utf-8", _utf8strict) | c.decode("utf-8", _utf8strict) | ||||
return c | return c | ||||
def toutf8b(s): | def toutf8b(s): | ||||
# type: (bytes) -> bytes | # type: (bytes) -> bytes | ||||
'''convert a local, possibly-binary string into UTF-8b | """convert a local, possibly-binary string into UTF-8b | ||||
This is intended as a generic method to preserve data when working | This is intended as a generic method to preserve data when working | ||||
with schemes like JSON and XML that have no provision for | with schemes like JSON and XML that have no provision for | ||||
arbitrary byte strings. As Mercurial often doesn't know | arbitrary byte strings. As Mercurial often doesn't know | ||||
what encoding data is in, we use so-called UTF-8b. | what encoding data is in, we use so-called UTF-8b. | ||||
If a string is already valid UTF-8 (or ASCII), it passes unmodified. | If a string is already valid UTF-8 (or ASCII), it passes unmodified. | ||||
Otherwise, unsupported bytes are mapped to UTF-16 surrogate range, | Otherwise, unsupported bytes are mapped to UTF-16 surrogate range, | ||||
- non-lossy local strings (aka safelocalstr) get sent as UTF-8 as well | - non-lossy local strings (aka safelocalstr) get sent as UTF-8 as well | ||||
- because we must preserve UTF-8 bytestring in places such as | - because we must preserve UTF-8 bytestring in places such as | ||||
filenames, metadata can't be roundtripped without help | filenames, metadata can't be roundtripped without help | ||||
(Note: "UTF-8b" often refers to decoding a mix of valid UTF-8 and | (Note: "UTF-8b" often refers to decoding a mix of valid UTF-8 and | ||||
arbitrary bytes into an internal Unicode format that can be | arbitrary bytes into an internal Unicode format that can be | ||||
re-encoded back into the original. Here we are exposing the | re-encoded back into the original. Here we are exposing the | ||||
internal surrogate encoding as a UTF-8 string.) | internal surrogate encoding as a UTF-8 string.) | ||||
''' | """ | ||||
if isinstance(s, localstr): | if isinstance(s, localstr): | ||||
# assume that the original UTF-8 sequence would never contain | # assume that the original UTF-8 sequence would never contain | ||||
# invalid characters in U+DCxx range | # invalid characters in U+DCxx range | ||||
return s._utf8 | return s._utf8 | ||||
elif isinstance(s, safelocalstr): | elif isinstance(s, safelocalstr): | ||||
# already verified that s is non-lossy in legacy encoding, which | # already verified that s is non-lossy in legacy encoding, which | ||||
# shouldn't contain characters in U+DCxx range | # shouldn't contain characters in U+DCxx range | ||||
c = unichr(0xDC00 + ord(s[pos])).encode('utf-8', _utf8strict) | c = unichr(0xDC00 + ord(s[pos])).encode('utf-8', _utf8strict) | ||||
pos += 1 | pos += 1 | ||||
r += c | r += c | ||||
return r | return r | ||||
def fromutf8b(s): | def fromutf8b(s): | ||||
# type: (bytes) -> bytes | # type: (bytes) -> bytes | ||||
'''Given a UTF-8b string, return a local, possibly-binary string. | """Given a UTF-8b string, return a local, possibly-binary string. | ||||
return the original binary string. This | return the original binary string. This | ||||
is a round-trip process for strings like filenames, but metadata | is a round-trip process for strings like filenames, but metadata | ||||
that's was passed through tolocal will remain in UTF-8. | that's was passed through tolocal will remain in UTF-8. | ||||
>>> roundtrip = lambda x: fromutf8b(toutf8b(x)) == x | >>> roundtrip = lambda x: fromutf8b(toutf8b(x)) == x | ||||
>>> m = b"\\xc3\\xa9\\x99abcd" | >>> m = b"\\xc3\\xa9\\x99abcd" | ||||
>>> toutf8b(m) | >>> toutf8b(m) | ||||
'\\xc3\\xa9\\xed\\xb2\\x99abcd' | '\\xc3\\xa9\\xed\\xb2\\x99abcd' | ||||
>>> roundtrip(m) | >>> roundtrip(m) | ||||
True | True | ||||
>>> roundtrip(b"\\xc2\\xc2\\x80") | >>> roundtrip(b"\\xc2\\xc2\\x80") | ||||
True | True | ||||
>>> roundtrip(b"\\xef\\xbf\\xbd") | >>> roundtrip(b"\\xef\\xbf\\xbd") | ||||
True | True | ||||
>>> roundtrip(b"\\xef\\xef\\xbf\\xbd") | >>> roundtrip(b"\\xef\\xef\\xbf\\xbd") | ||||
True | True | ||||
>>> roundtrip(b"\\xf1\\x80\\x80\\x80\\x80") | >>> roundtrip(b"\\xf1\\x80\\x80\\x80\\x80") | ||||
True | True | ||||
''' | """ | ||||
if isasciistr(s): | if isasciistr(s): | ||||
return s | return s | ||||
# fast path - look for uDxxx prefixes in s | # fast path - look for uDxxx prefixes in s | ||||
if b"\xed" not in s: | if b"\xed" not in s: | ||||
return s | return s | ||||
# We could do this with the unicode type but some Python builds | # We could do this with the unicode type but some Python builds |
hint=_( | hint=_( | ||||
b'see https://mercurial-scm.org/wiki/MergeStateRecords for ' | b'see https://mercurial-scm.org/wiki/MergeStateRecords for ' | ||||
b'more information' | b'more information' | ||||
), | ), | ||||
) | ) | ||||
class UnknownVersion(Abort): | class UnknownVersion(Abort): | ||||
"""generic exception for aborting from an encounter with an unknown version | """generic exception for aborting from an encounter with an unknown version""" | ||||
""" | |||||
def __init__(self, msg, hint=None, version=None): | def __init__(self, msg, hint=None, version=None): | ||||
self.version = version | self.version = version | ||||
super(UnknownVersion, self).__init__(msg, hint=hint) | super(UnknownVersion, self).__init__(msg, hint=hint) | ||||
class LockError(IOError): | class LockError(IOError): | ||||
def __init__(self, errno, strerror, filename, desc): | def __init__(self, errno, strerror, filename, desc): |
remote, | remote, | ||||
force=False, | force=False, | ||||
revs=None, | revs=None, | ||||
newbranch=False, | newbranch=False, | ||||
bookmarks=(), | bookmarks=(), | ||||
publish=False, | publish=False, | ||||
opargs=None, | opargs=None, | ||||
): | ): | ||||
'''Push outgoing changesets (limited by revs) from a local | """Push outgoing changesets (limited by revs) from a local | ||||
repository to remote. Return an integer: | repository to remote. Return an integer: | ||||
- None means nothing to push | - None means nothing to push | ||||
- 0 means HTTP error | - 0 means HTTP error | ||||
- 1 means we pushed and remote head count is unchanged *or* | - 1 means we pushed and remote head count is unchanged *or* | ||||
we have outgoing changesets but refused to push | we have outgoing changesets but refused to push | ||||
- other values as described by addchangegroup() | - other values as described by addchangegroup() | ||||
''' | """ | ||||
if opargs is None: | if opargs is None: | ||||
opargs = {} | opargs = {} | ||||
pushop = pushoperation( | pushop = pushoperation( | ||||
repo, | repo, | ||||
remote, | remote, | ||||
force, | force, | ||||
revs, | revs, | ||||
newbranch, | newbranch, | ||||
if all(unficl.hasnode(n) for n in pullop.rheads): | if all(unficl.hasnode(n) for n in pullop.rheads): | ||||
break | break | ||||
new_heads = headsofdiff(unficl.heads(), old_heads) | new_heads = headsofdiff(unficl.heads(), old_heads) | ||||
pullop.common = headsofunion(new_heads, pullop.common) | pullop.common = headsofunion(new_heads, pullop.common) | ||||
pullop.rheads = set(pullop.rheads) - pullop.common | pullop.rheads = set(pullop.rheads) - pullop.common | ||||
def add_confirm_callback(repo, pullop): | def add_confirm_callback(repo, pullop): | ||||
""" adds a finalize callback to transaction which can be used to show stats | """adds a finalize callback to transaction which can be used to show stats | ||||
to user and confirm the pull before committing transaction """ | to user and confirm the pull before committing transaction""" | ||||
tr = pullop.trmanager.transaction() | tr = pullop.trmanager.transaction() | ||||
scmutil.registersummarycallback( | scmutil.registersummarycallback( | ||||
repo, tr, txnname=b'pull', as_validator=True | repo, tr, txnname=b'pull', as_validator=True | ||||
) | ) | ||||
reporef = weakref.ref(repo.unfiltered()) | reporef = weakref.ref(repo.unfiltered()) | ||||
def prompt(tr): | def prompt(tr): | ||||
if pullop.remote.capable(b'getbundle'): | if pullop.remote.capable(b'getbundle'): | ||||
# TODO: get bundlecaps from remote | # TODO: get bundlecaps from remote | ||||
cg = pullop.remote.getbundle( | cg = pullop.remote.getbundle( | ||||
b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads | b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads | ||||
) | ) | ||||
elif pullop.heads is None: | elif pullop.heads is None: | ||||
with pullop.remote.commandexecutor() as e: | with pullop.remote.commandexecutor() as e: | ||||
cg = e.callcommand( | cg = e.callcommand( | ||||
b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',} | b'changegroup', | ||||
{ | |||||
b'nodes': pullop.fetch, | |||||
b'source': b'pull', | |||||
}, | |||||
).result() | ).result() | ||||
elif not pullop.remote.capable(b'changegroupsubset'): | elif not pullop.remote.capable(b'changegroupsubset'): | ||||
raise error.Abort( | raise error.Abort( | ||||
_( | _( | ||||
b"partial pull cannot be done because " | b"partial pull cannot be done because " | ||||
b"other repository doesn't support " | b"other repository doesn't support " | ||||
b"changegroupsubset." | b"changegroupsubset." |
) | ) | ||||
# And adjust the phase of all changesets accordingly. | # And adjust the phase of all changesets accordingly. | ||||
for phasenumber, phase in phases.phasenames.items(): | for phasenumber, phase in phases.phasenames.items(): | ||||
if phase == b'secret' or not csetres[b'nodesbyphase'][phase]: | if phase == b'secret' or not csetres[b'nodesbyphase'][phase]: | ||||
continue | continue | ||||
phases.advanceboundary( | phases.advanceboundary( | ||||
repo, tr, phasenumber, csetres[b'nodesbyphase'][phase], | repo, | ||||
tr, | |||||
phasenumber, | |||||
csetres[b'nodesbyphase'][phase], | |||||
) | ) | ||||
# Write bookmark updates. | # Write bookmark updates. | ||||
bookmarks.updatefromremote( | bookmarks.updatefromremote( | ||||
repo.ui, | repo.ui, | ||||
repo, | repo, | ||||
csetres[b'bookmarks'], | csetres[b'bookmarks'], | ||||
remote.url(), | remote.url(), | ||||
return False | return False | ||||
return True | return True | ||||
def _fetchrawstorefiles(repo, remote): | def _fetchrawstorefiles(repo, remote): | ||||
with remote.commandexecutor() as e: | with remote.commandexecutor() as e: | ||||
objs = e.callcommand( | objs = e.callcommand( | ||||
b'rawstorefiledata', {b'files': [b'changelog', b'manifestlog'],} | b'rawstorefiledata', | ||||
{ | |||||
b'files': [b'changelog', b'manifestlog'], | |||||
}, | |||||
).result() | ).result() | ||||
# First object is a summary of files data that follows. | # First object is a summary of files data that follows. | ||||
overall = next(objs) | overall = next(objs) | ||||
progress = repo.ui.makeprogress( | progress = repo.ui.makeprogress( | ||||
_(b'clone'), total=overall[b'totalsize'], unit=_(b'bytes') | _(b'clone'), total=overall[b'totalsize'], unit=_(b'bytes') | ||||
) | ) | ||||
for i in pycompat.xrange(0, len(csets), batchsize): | for i in pycompat.xrange(0, len(csets), batchsize): | ||||
batch = [x for x in csets[i : i + batchsize]] | batch = [x for x in csets[i : i + batchsize]] | ||||
if not batch: | if not batch: | ||||
continue | continue | ||||
with remote.commandexecutor() as e: | with remote.commandexecutor() as e: | ||||
args = { | args = { | ||||
b'revisions': [ | b'revisions': [ | ||||
{b'type': b'changesetexplicit', b'nodes': batch,} | { | ||||
b'type': b'changesetexplicit', | |||||
b'nodes': batch, | |||||
} | |||||
], | ], | ||||
b'fields': fields, | b'fields': fields, | ||||
b'haveparents': haveparents, | b'haveparents': haveparents, | ||||
} | } | ||||
if pathfilter: | if pathfilter: | ||||
args[b'pathfilter'] = pathfilter | args[b'pathfilter'] = pathfilter | ||||
for objname, loadermod, loadername in extraloaders: | for objname, loadermod, loadername in extraloaders: | ||||
extraobj = getattr(module, objname, None) | extraobj = getattr(module, objname, None) | ||||
if extraobj is not None: | if extraobj is not None: | ||||
getattr(loadermod, loadername)(ui, name, extraobj) | getattr(loadermod, loadername)(ui, name, extraobj) | ||||
def afterloaded(extension, callback): | def afterloaded(extension, callback): | ||||
'''Run the specified function after a named extension is loaded. | """Run the specified function after a named extension is loaded. | ||||
If the named extension is already loaded, the callback will be called | If the named extension is already loaded, the callback will be called | ||||
immediately. | immediately. | ||||
If the named extension never loads, the callback will be called after | If the named extension never loads, the callback will be called after | ||||
all extensions have been loaded. | all extensions have been loaded. | ||||
The callback receives the named argument ``loaded``, which is a boolean | The callback receives the named argument ``loaded``, which is a boolean | ||||
indicating whether the dependent extension actually loaded. | indicating whether the dependent extension actually loaded. | ||||
''' | """ | ||||
if extension in _extensions: | if extension in _extensions: | ||||
# Report loaded as False if the extension is disabled | # Report loaded as False if the extension is disabled | ||||
loaded = _extensions[extension] is not None | loaded = _extensions[extension] is not None | ||||
callback(loaded=loaded) | callback(loaded=loaded) | ||||
else: | else: | ||||
_aftercallbacks.setdefault(extension, []).append(callback) | _aftercallbacks.setdefault(extension, []).append(callback) | ||||
ui.traceback(force=True) | ui.traceback(force=True) | ||||
ui.warn( | ui.warn( | ||||
_(b'*** failed to populate ui by extension %s: %s\n') | _(b'*** failed to populate ui by extension %s: %s\n') | ||||
% (name, stringutil.forcebytestr(inst)) | % (name, stringutil.forcebytestr(inst)) | ||||
) | ) | ||||
def bind(func, *args): | def bind(func, *args): | ||||
'''Partial function application | """Partial function application | ||||
Returns a new function that is the partial application of args and kwargs | Returns a new function that is the partial application of args and kwargs | ||||
to func. For example, | to func. For example, | ||||
f(1, 2, bar=3) === bind(f, 1)(2, bar=3)''' | f(1, 2, bar=3) === bind(f, 1)(2, bar=3)""" | ||||
assert callable(func) | assert callable(func) | ||||
def closure(*a, **kw): | def closure(*a, **kw): | ||||
return func(*(args + a), **kw) | return func(*(args + a), **kw) | ||||
return closure | return closure | ||||
def __enter__(self): | def __enter__(self): | ||||
wrapfunction(self._container, self._funcname, self._wrapper) | wrapfunction(self._container, self._funcname, self._wrapper) | ||||
def __exit__(self, exctype, excvalue, traceback): | def __exit__(self, exctype, excvalue, traceback): | ||||
unwrapfunction(self._container, self._funcname, self._wrapper) | unwrapfunction(self._container, self._funcname, self._wrapper) | ||||
def wrapfunction(container, funcname, wrapper): | def wrapfunction(container, funcname, wrapper): | ||||
'''Wrap the function named funcname in container | """Wrap the function named funcname in container | ||||
Replace the funcname member in the given container with the specified | Replace the funcname member in the given container with the specified | ||||
wrapper. The container is typically a module, class, or instance. | wrapper. The container is typically a module, class, or instance. | ||||
The wrapper will be called like | The wrapper will be called like | ||||
wrapper(orig, *args, **kwargs) | wrapper(orig, *args, **kwargs) | ||||
[...extension stuff...] | [...extension stuff...] | ||||
repo.__class__ = myrepo | repo.__class__ = myrepo | ||||
In general, combining wrapfunction() with subclassing does not | In general, combining wrapfunction() with subclassing does not | ||||
work. Since you cannot control what other extensions are loaded by | work. Since you cannot control what other extensions are loaded by | ||||
your end users, you should play nicely with others by using the | your end users, you should play nicely with others by using the | ||||
subclass trick. | subclass trick. | ||||
''' | """ | ||||
assert callable(wrapper) | assert callable(wrapper) | ||||
origfn = getattr(container, funcname) | origfn = getattr(container, funcname) | ||||
assert callable(origfn) | assert callable(origfn) | ||||
if inspect.ismodule(container): | if inspect.ismodule(container): | ||||
# origfn is not an instance or class method. "partial" can be used. | # origfn is not an instance or class method. "partial" can be used. | ||||
# "partial" won't insert a frame in traceback. | # "partial" won't insert a frame in traceback. | ||||
wrap = functools.partial(wrapper, origfn) | wrap = functools.partial(wrapper, origfn) | ||||
else: | else: | ||||
# "partial" cannot be safely used. Emulate its effect by using "bind". | # "partial" cannot be safely used. Emulate its effect by using "bind". | ||||
# The downside is one more frame in traceback. | # The downside is one more frame in traceback. | ||||
wrap = bind(wrapper, origfn) | wrap = bind(wrapper, origfn) | ||||
_updatewrapper(wrap, origfn, wrapper) | _updatewrapper(wrap, origfn, wrapper) | ||||
setattr(container, funcname, wrap) | setattr(container, funcname, wrap) | ||||
return origfn | return origfn | ||||
def unwrapfunction(container, funcname, wrapper=None): | def unwrapfunction(container, funcname, wrapper=None): | ||||
'''undo wrapfunction | """undo wrapfunction | ||||
If wrappers is None, undo the last wrap. Otherwise removes the wrapper | If wrappers is None, undo the last wrap. Otherwise removes the wrapper | ||||
from the chain of wrappers. | from the chain of wrappers. | ||||
Return the removed wrapper. | Return the removed wrapper. | ||||
Raise IndexError if wrapper is None and nothing to unwrap; ValueError if | Raise IndexError if wrapper is None and nothing to unwrap; ValueError if | ||||
wrapper is not None but is not found in the wrapper chain. | wrapper is not None but is not found in the wrapper chain. | ||||
''' | """ | ||||
chain = getwrapperchain(container, funcname) | chain = getwrapperchain(container, funcname) | ||||
origfn = chain.pop() | origfn = chain.pop() | ||||
if wrapper is None: | if wrapper is None: | ||||
wrapper = chain[0] | wrapper = chain[0] | ||||
chain.remove(wrapper) | chain.remove(wrapper) | ||||
setattr(container, funcname, origfn) | setattr(container, funcname, origfn) | ||||
for w in reversed(chain): | for w in reversed(chain): | ||||
wrapfunction(container, funcname, w) | wrapfunction(container, funcname, w) | ||||
return wrapper | return wrapper | ||||
def getwrapperchain(container, funcname): | def getwrapperchain(container, funcname): | ||||
'''get a chain of wrappers of a function | """get a chain of wrappers of a function | ||||
Return a list of functions: [newest wrapper, ..., oldest wrapper, origfunc] | Return a list of functions: [newest wrapper, ..., oldest wrapper, origfunc] | ||||
The wrapper functions are the ones passed to wrapfunction, whose first | The wrapper functions are the ones passed to wrapfunction, whose first | ||||
argument is origfunc. | argument is origfunc. | ||||
''' | """ | ||||
result = [] | result = [] | ||||
fn = getattr(container, funcname) | fn = getattr(container, funcname) | ||||
while fn: | while fn: | ||||
assert callable(fn) | assert callable(fn) | ||||
result.append(getattr(fn, '_unboundwrapper', fn)) | result.append(getattr(fn, '_unboundwrapper', fn)) | ||||
fn = getattr(fn, '_origfunc', None) | fn = getattr(fn, '_origfunc', None) | ||||
return result | return result | ||||
# If no path was provided for a disabled extension (e.g. "color=!"), | # If no path was provided for a disabled extension (e.g. "color=!"), | ||||
# don't replace the path we already found by the scan above. | # don't replace the path we already found by the scan above. | ||||
if path: | if path: | ||||
exts[name] = path | exts[name] = path | ||||
return exts | return exts | ||||
def _moduledoc(file): | def _moduledoc(file): | ||||
'''return the top-level python documentation for the given file | """return the top-level python documentation for the given file | ||||
Loosely inspired by pydoc.source_synopsis(), but rewritten to | Loosely inspired by pydoc.source_synopsis(), but rewritten to | ||||
handle triple quotes and to return the whole text instead of just | handle triple quotes and to return the whole text instead of just | ||||
the synopsis''' | the synopsis""" | ||||
result = [] | result = [] | ||||
line = file.readline() | line = file.readline() | ||||
while line[:1] == b'#' or not line.strip(): | while line[:1] == b'#' or not line.strip(): | ||||
line = file.readline() | line = file.readline() | ||||
if not line: | if not line: | ||||
break | break | ||||
break | break | ||||
else: | else: | ||||
cmd = aliases[0] | cmd = aliases[0] | ||||
doc = _disabledhelp(path) | doc = _disabledhelp(path) | ||||
return (cmd, name, doc) | return (cmd, name, doc) | ||||
def disabledcmd(ui, cmd, strict=False): | def disabledcmd(ui, cmd, strict=False): | ||||
'''find cmd from disabled extensions without importing. | """find cmd from disabled extensions without importing. | ||||
returns (cmdname, extname, doc)''' | returns (cmdname, extname, doc)""" | ||||
paths = _disabledpaths() | paths = _disabledpaths() | ||||
if not paths: | if not paths: | ||||
raise error.UnknownCommand(cmd) | raise error.UnknownCommand(cmd) | ||||
ext = None | ext = None | ||||
# first, search for an extension with the same name as the command | # first, search for an extension with the same name as the command | ||||
path = paths.pop(cmd, None) | path = paths.pop(cmd, None) |
def filemerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None): | def filemerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None): | ||||
return _filemerge( | return _filemerge( | ||||
False, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels | False, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels | ||||
) | ) | ||||
def loadinternalmerge(ui, extname, registrarobj): | def loadinternalmerge(ui, extname, registrarobj): | ||||
"""Load internal merge tool from specified registrarobj | """Load internal merge tool from specified registrarobj""" | ||||
""" | |||||
for name, func in pycompat.iteritems(registrarobj._table): | for name, func in pycompat.iteritems(registrarobj._table): | ||||
fullname = b':' + name | fullname = b':' + name | ||||
internals[fullname] = func | internals[fullname] = func | ||||
internals[b'internal:' + name] = func | internals[b'internal:' + name] = func | ||||
internalsdoc[fullname] = func | internalsdoc[fullname] = func | ||||
capabilities = sorted([k for k, v in func.capabilities.items() if v]) | capabilities = sorted([k for k, v in func.capabilities.items() if v]) | ||||
if capabilities: | if capabilities: |
# x - argument in tree form | # x - argument in tree form | ||||
symbols = filesetlang.symbols | symbols = filesetlang.symbols | ||||
predicate = registrar.filesetpredicate(symbols) | predicate = registrar.filesetpredicate(symbols) | ||||
@predicate(b'modified()', callstatus=True, weight=_WEIGHT_STATUS) | @predicate(b'modified()', callstatus=True, weight=_WEIGHT_STATUS) | ||||
def modified(mctx, x): | def modified(mctx, x): | ||||
"""File that is modified according to :hg:`status`. | """File that is modified according to :hg:`status`.""" | ||||
""" | |||||
# i18n: "modified" is a keyword | # i18n: "modified" is a keyword | ||||
getargs(x, 0, 0, _(b"modified takes no arguments")) | getargs(x, 0, 0, _(b"modified takes no arguments")) | ||||
s = set(mctx.status().modified) | s = set(mctx.status().modified) | ||||
return mctx.predicate(s.__contains__, predrepr=b'modified') | return mctx.predicate(s.__contains__, predrepr=b'modified') | ||||
@predicate(b'added()', callstatus=True, weight=_WEIGHT_STATUS) | @predicate(b'added()', callstatus=True, weight=_WEIGHT_STATUS) | ||||
def added(mctx, x): | def added(mctx, x): | ||||
"""File that is added according to :hg:`status`. | """File that is added according to :hg:`status`.""" | ||||
""" | |||||
# i18n: "added" is a keyword | # i18n: "added" is a keyword | ||||
getargs(x, 0, 0, _(b"added takes no arguments")) | getargs(x, 0, 0, _(b"added takes no arguments")) | ||||
s = set(mctx.status().added) | s = set(mctx.status().added) | ||||
return mctx.predicate(s.__contains__, predrepr=b'added') | return mctx.predicate(s.__contains__, predrepr=b'added') | ||||
@predicate(b'removed()', callstatus=True, weight=_WEIGHT_STATUS) | @predicate(b'removed()', callstatus=True, weight=_WEIGHT_STATUS) | ||||
def removed(mctx, x): | def removed(mctx, x): | ||||
"""File that is removed according to :hg:`status`. | """File that is removed according to :hg:`status`.""" | ||||
""" | |||||
# i18n: "removed" is a keyword | # i18n: "removed" is a keyword | ||||
getargs(x, 0, 0, _(b"removed takes no arguments")) | getargs(x, 0, 0, _(b"removed takes no arguments")) | ||||
s = set(mctx.status().removed) | s = set(mctx.status().removed) | ||||
return mctx.predicate(s.__contains__, predrepr=b'removed') | return mctx.predicate(s.__contains__, predrepr=b'removed') | ||||
@predicate(b'deleted()', callstatus=True, weight=_WEIGHT_STATUS) | @predicate(b'deleted()', callstatus=True, weight=_WEIGHT_STATUS) | ||||
def deleted(mctx, x): | def deleted(mctx, x): | ||||
"""Alias for ``missing()``. | """Alias for ``missing()``.""" | ||||
""" | |||||
# i18n: "deleted" is a keyword | # i18n: "deleted" is a keyword | ||||
getargs(x, 0, 0, _(b"deleted takes no arguments")) | getargs(x, 0, 0, _(b"deleted takes no arguments")) | ||||
s = set(mctx.status().deleted) | s = set(mctx.status().deleted) | ||||
return mctx.predicate(s.__contains__, predrepr=b'deleted') | return mctx.predicate(s.__contains__, predrepr=b'deleted') | ||||
@predicate(b'missing()', callstatus=True, weight=_WEIGHT_STATUS) | @predicate(b'missing()', callstatus=True, weight=_WEIGHT_STATUS) | ||||
def missing(mctx, x): | def missing(mctx, x): | ||||
"""File that is missing according to :hg:`status`. | """File that is missing according to :hg:`status`.""" | ||||
""" | |||||
# i18n: "missing" is a keyword | # i18n: "missing" is a keyword | ||||
getargs(x, 0, 0, _(b"missing takes no arguments")) | getargs(x, 0, 0, _(b"missing takes no arguments")) | ||||
s = set(mctx.status().deleted) | s = set(mctx.status().deleted) | ||||
return mctx.predicate(s.__contains__, predrepr=b'deleted') | return mctx.predicate(s.__contains__, predrepr=b'deleted') | ||||
@predicate(b'unknown()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH) | @predicate(b'unknown()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH) | ||||
def unknown(mctx, x): | def unknown(mctx, x): | ||||
# i18n: "ignored" is a keyword | # i18n: "ignored" is a keyword | ||||
getargs(x, 0, 0, _(b"ignored takes no arguments")) | getargs(x, 0, 0, _(b"ignored takes no arguments")) | ||||
s = set(mctx.status().ignored) | s = set(mctx.status().ignored) | ||||
return mctx.predicate(s.__contains__, predrepr=b'ignored') | return mctx.predicate(s.__contains__, predrepr=b'ignored') | ||||
@predicate(b'clean()', callstatus=True, weight=_WEIGHT_STATUS) | @predicate(b'clean()', callstatus=True, weight=_WEIGHT_STATUS) | ||||
def clean(mctx, x): | def clean(mctx, x): | ||||
"""File that is clean according to :hg:`status`. | """File that is clean according to :hg:`status`.""" | ||||
""" | |||||
# i18n: "clean" is a keyword | # i18n: "clean" is a keyword | ||||
getargs(x, 0, 0, _(b"clean takes no arguments")) | getargs(x, 0, 0, _(b"clean takes no arguments")) | ||||
s = set(mctx.status().clean) | s = set(mctx.status().clean) | ||||
return mctx.predicate(s.__contains__, predrepr=b'clean') | return mctx.predicate(s.__contains__, predrepr=b'clean') | ||||
@predicate(b'tracked()') | @predicate(b'tracked()') | ||||
def tracked(mctx, x): | def tracked(mctx, x): | ||||
"""File that is under Mercurial control.""" | """File that is under Mercurial control.""" | ||||
# i18n: "tracked" is a keyword | # i18n: "tracked" is a keyword | ||||
getargs(x, 0, 0, _(b"tracked takes no arguments")) | getargs(x, 0, 0, _(b"tracked takes no arguments")) | ||||
return mctx.predicate(mctx.ctx.__contains__, predrepr=b'tracked') | return mctx.predicate(mctx.ctx.__contains__, predrepr=b'tracked') | ||||
@predicate(b'binary()', weight=_WEIGHT_READ_CONTENTS) | @predicate(b'binary()', weight=_WEIGHT_READ_CONTENTS) | ||||
def binary(mctx, x): | def binary(mctx, x): | ||||
"""File that appears to be binary (contains NUL bytes). | """File that appears to be binary (contains NUL bytes).""" | ||||
""" | |||||
# i18n: "binary" is a keyword | # i18n: "binary" is a keyword | ||||
getargs(x, 0, 0, _(b"binary takes no arguments")) | getargs(x, 0, 0, _(b"binary takes no arguments")) | ||||
return mctx.fpredicate( | return mctx.fpredicate( | ||||
lambda fctx: fctx.isbinary(), predrepr=b'binary', cache=True | lambda fctx: fctx.isbinary(), predrepr=b'binary', cache=True | ||||
) | ) | ||||
@predicate(b'exec()') | @predicate(b'exec()') | ||||
def exec_(mctx, x): | def exec_(mctx, x): | ||||
"""File that is marked as executable. | """File that is marked as executable.""" | ||||
""" | |||||
# i18n: "exec" is a keyword | # i18n: "exec" is a keyword | ||||
getargs(x, 0, 0, _(b"exec takes no arguments")) | getargs(x, 0, 0, _(b"exec takes no arguments")) | ||||
ctx = mctx.ctx | ctx = mctx.ctx | ||||
return mctx.predicate(lambda f: ctx.flags(f) == b'x', predrepr=b'exec') | return mctx.predicate(lambda f: ctx.flags(f) == b'x', predrepr=b'exec') | ||||
@predicate(b'symlink()') | @predicate(b'symlink()') | ||||
def symlink(mctx, x): | def symlink(mctx, x): | ||||
"""File that is marked as a symlink. | """File that is marked as a symlink.""" | ||||
""" | |||||
# i18n: "symlink" is a keyword | # i18n: "symlink" is a keyword | ||||
getargs(x, 0, 0, _(b"symlink takes no arguments")) | getargs(x, 0, 0, _(b"symlink takes no arguments")) | ||||
ctx = mctx.ctx | ctx = mctx.ctx | ||||
return mctx.predicate(lambda f: ctx.flags(f) == b'l', predrepr=b'symlink') | return mctx.predicate(lambda f: ctx.flags(f) == b'l', predrepr=b'symlink') | ||||
@predicate(b'resolved()', weight=_WEIGHT_STATUS) | @predicate(b'resolved()', weight=_WEIGHT_STATUS) | ||||
def resolved(mctx, x): | def resolved(mctx, x): | ||||
"""File that is marked resolved according to :hg:`resolve -l`. | """File that is marked resolved according to :hg:`resolve -l`.""" | ||||
""" | |||||
# i18n: "resolved" is a keyword | # i18n: "resolved" is a keyword | ||||
getargs(x, 0, 0, _(b"resolved takes no arguments")) | getargs(x, 0, 0, _(b"resolved takes no arguments")) | ||||
if mctx.ctx.rev() is not None: | if mctx.ctx.rev() is not None: | ||||
return mctx.never() | return mctx.never() | ||||
ms = mergestatemod.mergestate.read(mctx.ctx.repo()) | ms = mergestatemod.mergestate.read(mctx.ctx.repo()) | ||||
return mctx.predicate( | return mctx.predicate( | ||||
lambda f: f in ms and ms[f] == b'r', predrepr=b'resolved' | lambda f: f in ms and ms[f] == b'r', predrepr=b'resolved' | ||||
) | ) | ||||
@predicate(b'unresolved()', weight=_WEIGHT_STATUS) | @predicate(b'unresolved()', weight=_WEIGHT_STATUS) | ||||
def unresolved(mctx, x): | def unresolved(mctx, x): | ||||
"""File that is marked unresolved according to :hg:`resolve -l`. | """File that is marked unresolved according to :hg:`resolve -l`.""" | ||||
""" | |||||
# i18n: "unresolved" is a keyword | # i18n: "unresolved" is a keyword | ||||
getargs(x, 0, 0, _(b"unresolved takes no arguments")) | getargs(x, 0, 0, _(b"unresolved takes no arguments")) | ||||
if mctx.ctx.rev() is not None: | if mctx.ctx.rev() is not None: | ||||
return mctx.never() | return mctx.never() | ||||
ms = mergestatemod.mergestate.read(mctx.ctx.repo()) | ms = mergestatemod.mergestate.read(mctx.ctx.repo()) | ||||
return mctx.predicate( | return mctx.predicate( | ||||
lambda f: f in ms and ms[f] == b'u', predrepr=b'unresolved' | lambda f: f in ms and ms[f] == b'u', predrepr=b'unresolved' | ||||
) | ) | ||||
@predicate(b'hgignore()', weight=_WEIGHT_STATUS) | @predicate(b'hgignore()', weight=_WEIGHT_STATUS) | ||||
def hgignore(mctx, x): | def hgignore(mctx, x): | ||||
"""File that matches the active .hgignore pattern. | """File that matches the active .hgignore pattern.""" | ||||
""" | |||||
# i18n: "hgignore" is a keyword | # i18n: "hgignore" is a keyword | ||||
getargs(x, 0, 0, _(b"hgignore takes no arguments")) | getargs(x, 0, 0, _(b"hgignore takes no arguments")) | ||||
return mctx.ctx.repo().dirstate._ignore | return mctx.ctx.repo().dirstate._ignore | ||||
@predicate(b'portable()', weight=_WEIGHT_CHECK_FILENAME) | @predicate(b'portable()', weight=_WEIGHT_CHECK_FILENAME) | ||||
def portable(mctx, x): | def portable(mctx, x): | ||||
"""File that has a portable name. (This doesn't include filenames with case | """File that has a portable name. (This doesn't include filenames with case | ||||
collisions.) | collisions.) | ||||
""" | """ | ||||
# i18n: "portable" is a keyword | # i18n: "portable" is a keyword | ||||
getargs(x, 0, 0, _(b"portable takes no arguments")) | getargs(x, 0, 0, _(b"portable takes no arguments")) | ||||
return mctx.predicate( | return mctx.predicate( | ||||
lambda f: util.checkwinfilename(f) is None, predrepr=b'portable' | lambda f: util.checkwinfilename(f) is None, predrepr=b'portable' | ||||
) | ) | ||||
@predicate(b'grep(regex)', weight=_WEIGHT_READ_CONTENTS) | @predicate(b'grep(regex)', weight=_WEIGHT_READ_CONTENTS) | ||||
def grep(mctx, x): | def grep(mctx, x): | ||||
"""File contains the given regular expression. | """File contains the given regular expression.""" | ||||
""" | |||||
try: | try: | ||||
# i18n: "grep" is a keyword | # i18n: "grep" is a keyword | ||||
r = re.compile(getstring(x, _(b"grep requires a pattern"))) | r = re.compile(getstring(x, _(b"grep requires a pattern"))) | ||||
except re.error as e: | except re.error as e: | ||||
raise error.ParseError( | raise error.ParseError( | ||||
_(b'invalid match pattern: %s') % stringutil.forcebytestr(e) | _(b'invalid match pattern: %s') % stringutil.forcebytestr(e) | ||||
) | ) | ||||
return mctx.fpredicate( | return mctx.fpredicate( | ||||
return True | return True | ||||
return False | return False | ||||
return mctx.fpredicate(eolp, predrepr=(b'eol(%r)', enc), cache=True) | return mctx.fpredicate(eolp, predrepr=(b'eol(%r)', enc), cache=True) | ||||
@predicate(b'copied()') | @predicate(b'copied()') | ||||
def copied(mctx, x): | def copied(mctx, x): | ||||
"""File that is recorded as being copied. | """File that is recorded as being copied.""" | ||||
""" | |||||
# i18n: "copied" is a keyword | # i18n: "copied" is a keyword | ||||
getargs(x, 0, 0, _(b"copied takes no arguments")) | getargs(x, 0, 0, _(b"copied takes no arguments")) | ||||
def copiedp(fctx): | def copiedp(fctx): | ||||
p = fctx.parents() | p = fctx.parents() | ||||
return p and p[0].path() != fctx.path() | return p and p[0].path() != fctx.path() | ||||
return mctx.fpredicate(copiedp, predrepr=b'copied', cache=True) | return mctx.fpredicate(copiedp, predrepr=b'copied', cache=True) | ||||
raise error.ParseError(reverr) | raise error.ParseError(reverr) | ||||
basectx, ctx = scmutil.revpair(repo, [baserevspec, revspec]) | basectx, ctx = scmutil.revpair(repo, [baserevspec, revspec]) | ||||
mc = mctx.switch(basectx, ctx) | mc = mctx.switch(basectx, ctx) | ||||
return getmatch(mc, x) | return getmatch(mc, x) | ||||
@predicate(b'subrepo([pattern])') | @predicate(b'subrepo([pattern])') | ||||
def subrepo(mctx, x): | def subrepo(mctx, x): | ||||
"""Subrepositories whose paths match the given pattern. | """Subrepositories whose paths match the given pattern.""" | ||||
""" | |||||
# i18n: "subrepo" is a keyword | # i18n: "subrepo" is a keyword | ||||
getargs(x, 0, 1, _(b"subrepo takes at most one argument")) | getargs(x, 0, 1, _(b"subrepo takes at most one argument")) | ||||
ctx = mctx.ctx | ctx = mctx.ctx | ||||
sstate = ctx.substate | sstate = ctx.substate | ||||
if x: | if x: | ||||
pat = getpattern( | pat = getpattern( | ||||
x, | x, | ||||
matchmod.allpatternkinds, | matchmod.allpatternkinds, | ||||
tree = filesetlang.parse(expr) | tree = filesetlang.parse(expr) | ||||
tree = filesetlang.analyze(tree) | tree = filesetlang.analyze(tree) | ||||
tree = filesetlang.optimize(tree) | tree = filesetlang.optimize(tree) | ||||
mctx = matchctx(ctx.p1(), ctx, cwd, badfn=badfn) | mctx = matchctx(ctx.p1(), ctx, cwd, badfn=badfn) | ||||
return getmatch(mctx, tree) | return getmatch(mctx, tree) | ||||
def loadpredicate(ui, extname, registrarobj): | def loadpredicate(ui, extname, registrarobj): | ||||
"""Load fileset predicates from specified registrarobj | """Load fileset predicates from specified registrarobj""" | ||||
""" | |||||
for name, func in pycompat.iteritems(registrarobj._table): | for name, func in pycompat.iteritems(registrarobj._table): | ||||
symbols[name] = func | symbols[name] = func | ||||
# tell hggettext to extract docstrings from these functions: | # tell hggettext to extract docstrings from these functions: | ||||
i18nfunctions = symbols.values() | i18nfunctions = symbols.values() |
commands, | commands, | ||||
name, | name, | ||||
unknowncmd=False, | unknowncmd=False, | ||||
full=True, | full=True, | ||||
subtopic=None, | subtopic=None, | ||||
fullname=None, | fullname=None, | ||||
**opts | **opts | ||||
): | ): | ||||
''' | """ | ||||
Generate the help for 'name' as unformatted restructured text. If | Generate the help for 'name' as unformatted restructured text. If | ||||
'name' is None, describe the commands available. | 'name' is None, describe the commands available. | ||||
''' | """ | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
def helpcmd(name, subtopic=None): | def helpcmd(name, subtopic=None): | ||||
try: | try: | ||||
aliases, entry = cmdutil.findcmd( | aliases, entry = cmdutil.findcmd( | ||||
name, commands.table, strict=unknowncmd | name, commands.table, strict=unknowncmd | ||||
) | ) |
'''return a repository peer for the specified path''' | '''return a repository peer for the specified path''' | ||||
rui = remoteui(uiorrepo, opts) | rui = remoteui(uiorrepo, opts) | ||||
return _peerorrepo( | return _peerorrepo( | ||||
rui, path, create, intents=intents, createopts=createopts | rui, path, create, intents=intents, createopts=createopts | ||||
).peer() | ).peer() | ||||
def defaultdest(source): | def defaultdest(source): | ||||
'''return default destination of clone if none is given | """return default destination of clone if none is given | ||||
>>> defaultdest(b'foo') | >>> defaultdest(b'foo') | ||||
'foo' | 'foo' | ||||
>>> defaultdest(b'/foo/bar') | >>> defaultdest(b'/foo/bar') | ||||
'bar' | 'bar' | ||||
>>> defaultdest(b'/') | >>> defaultdest(b'/') | ||||
'' | '' | ||||
>>> defaultdest(b'') | >>> defaultdest(b'') | ||||
'' | '' | ||||
>>> defaultdest(b'http://example.org/') | >>> defaultdest(b'http://example.org/') | ||||
'' | '' | ||||
>>> defaultdest(b'http://example.org/foo/') | >>> defaultdest(b'http://example.org/foo/') | ||||
'foo' | 'foo' | ||||
''' | """ | ||||
path = util.url(source).path | path = util.url(source).path | ||||
if not path: | if not path: | ||||
return b'' | return b'' | ||||
return os.path.basename(os.path.normpath(path)) | return os.path.basename(os.path.normpath(path)) | ||||
def sharedreposource(repo): | def sharedreposource(repo): | ||||
"""Returns repository object for source repository of a shared repo. | """Returns repository object for source repository of a shared repo. | ||||
postshare(srcrepo, r, defaultpath=defaultpath) | postshare(srcrepo, r, defaultpath=defaultpath) | ||||
r = repository(ui, dest) | r = repository(ui, dest) | ||||
_postshareupdate(r, update, checkout=checkout) | _postshareupdate(r, update, checkout=checkout) | ||||
return r | return r | ||||
def _prependsourcehgrc(repo): | def _prependsourcehgrc(repo): | ||||
""" copies the source repo config and prepend it in current repo .hg/hgrc | """copies the source repo config and prepend it in current repo .hg/hgrc | ||||
on unshare. This is only done if the share was perfomed using share safe | on unshare. This is only done if the share was perfomed using share safe | ||||
method where we share config of source in shares""" | method where we share config of source in shares""" | ||||
srcvfs = vfsmod.vfs(repo.sharedpath) | srcvfs = vfsmod.vfs(repo.sharedpath) | ||||
dstvfs = vfsmod.vfs(repo.path) | dstvfs = vfsmod.vfs(repo.path) | ||||
if not srcvfs.exists(b'hgrc'): | if not srcvfs.exists(b'hgrc'): | ||||
return | return | ||||
uprev = repo.lookup(test) | uprev = repo.lookup(test) | ||||
break | break | ||||
except error.RepoLookupError: | except error.RepoLookupError: | ||||
continue | continue | ||||
_update(repo, uprev) | _update(repo, uprev) | ||||
def copystore(ui, srcrepo, destpath): | def copystore(ui, srcrepo, destpath): | ||||
'''copy files from store of srcrepo in destpath | """copy files from store of srcrepo in destpath | ||||
returns destlock | returns destlock | ||||
''' | """ | ||||
destlock = None | destlock = None | ||||
try: | try: | ||||
hardlink = None | hardlink = None | ||||
topic = _(b'linking') if hardlink else _(b'copying') | topic = _(b'linking') if hardlink else _(b'copying') | ||||
with ui.makeprogress(topic, unit=_(b'files')) as progress: | with ui.makeprogress(topic, unit=_(b'files')) as progress: | ||||
num = 0 | num = 0 | ||||
srcpublishing = srcrepo.publishing() | srcpublishing = srcrepo.publishing() | ||||
srcvfs = vfsmod.vfs(srcrepo.sharedpath) | srcvfs = vfsmod.vfs(srcrepo.sharedpath) | ||||
) | ) | ||||
) | ) | ||||
# TODO this is batchable. | # TODO this is batchable. | ||||
remoterevs = [] | remoterevs = [] | ||||
for r in rev: | for r in rev: | ||||
with srcpeer.commandexecutor() as e: | with srcpeer.commandexecutor() as e: | ||||
remoterevs.append( | remoterevs.append( | ||||
e.callcommand(b'lookup', {b'key': r,}).result() | e.callcommand( | ||||
b'lookup', | |||||
{ | |||||
b'key': r, | |||||
}, | |||||
).result() | |||||
) | ) | ||||
revs = remoterevs | revs = remoterevs | ||||
# Obtain a lock before checking for or cloning the pooled repo otherwise | # Obtain a lock before checking for or cloning the pooled repo otherwise | ||||
# 2 clients may race creating or populating it. | # 2 clients may race creating or populating it. | ||||
pooldir = os.path.dirname(sharepath) | pooldir = os.path.dirname(sharepath) | ||||
# lock class requires the directory to exist. | # lock class requires the directory to exist. | ||||
try: | try: | ||||
if sharenamemode == b'identity': | if sharenamemode == b'identity': | ||||
# Resolve the name from the initial changeset in the remote | # Resolve the name from the initial changeset in the remote | ||||
# repository. This returns nullid when the remote is empty. It | # repository. This returns nullid when the remote is empty. It | ||||
# raises RepoLookupError if revision 0 is filtered or otherwise | # raises RepoLookupError if revision 0 is filtered or otherwise | ||||
# not available. If we fail to resolve, sharing is not enabled. | # not available. If we fail to resolve, sharing is not enabled. | ||||
try: | try: | ||||
with srcpeer.commandexecutor() as e: | with srcpeer.commandexecutor() as e: | ||||
rootnode = e.callcommand( | rootnode = e.callcommand( | ||||
b'lookup', {b'key': b'0',} | b'lookup', | ||||
{ | |||||
b'key': b'0', | |||||
}, | |||||
).result() | ).result() | ||||
if rootnode != node.nullid: | if rootnode != node.nullid: | ||||
sharepath = os.path.join(sharepool, node.hex(rootnode)) | sharepath = os.path.join(sharepool, node.hex(rootnode)) | ||||
else: | else: | ||||
ui.status( | ui.status( | ||||
_( | _( | ||||
b'(not using pooled storage: ' | b'(not using pooled storage: ' | ||||
) | ) | ||||
) | ) | ||||
# TODO this is batchable. | # TODO this is batchable. | ||||
remoterevs = [] | remoterevs = [] | ||||
for rev in revs: | for rev in revs: | ||||
with srcpeer.commandexecutor() as e: | with srcpeer.commandexecutor() as e: | ||||
remoterevs.append( | remoterevs.append( | ||||
e.callcommand(b'lookup', {b'key': rev,}).result() | e.callcommand( | ||||
b'lookup', | |||||
{ | |||||
b'key': rev, | |||||
}, | |||||
).result() | |||||
) | ) | ||||
revs = remoterevs | revs = remoterevs | ||||
checkout = revs[0] | checkout = revs[0] | ||||
else: | else: | ||||
revs = None | revs = None | ||||
local = destpeer.local() | local = destpeer.local() | ||||
if local: | if local: | ||||
if ui.configbool(b'experimental', b'remotenames'): | if ui.configbool(b'experimental', b'remotenames'): | ||||
logexchange.pullremotenames(destrepo, srcpeer) | logexchange.pullremotenames(destrepo, srcpeer) | ||||
if update: | if update: | ||||
if update is not True: | if update is not True: | ||||
with srcpeer.commandexecutor() as e: | with srcpeer.commandexecutor() as e: | ||||
checkout = e.callcommand( | checkout = e.callcommand( | ||||
b'lookup', {b'key': update,} | b'lookup', | ||||
{ | |||||
b'key': update, | |||||
}, | |||||
).result() | ).result() | ||||
uprev = None | uprev = None | ||||
status = None | status = None | ||||
if checkout is not None: | if checkout is not None: | ||||
# Some extensions (at least hg-git and hg-subversion) have | # Some extensions (at least hg-git and hg-subversion) have | ||||
# a peer.lookup() implementation that returns a name instead | # a peer.lookup() implementation that returns a name instead | ||||
# of a nodeid. We work around it here until we've figured | # of a nodeid. We work around it here until we've figured | ||||
if warndest: | if warndest: | ||||
destutil.statusotherdests(ui, repo) | destutil.statusotherdests(ui, repo) | ||||
return ret | return ret | ||||
def merge( | def merge( | ||||
ctx, force=False, remind=True, labels=None, | ctx, | ||||
force=False, | |||||
remind=True, | |||||
labels=None, | |||||
): | ): | ||||
"""Branch merge with node, resolving changes. Return true if any | """Branch merge with node, resolving changes. Return true if any | ||||
unresolved conflicts.""" | unresolved conflicts.""" | ||||
repo = ctx.repo() | repo = ctx.repo() | ||||
stats = mergemod.merge(ctx, force=force, labels=labels) | stats = mergemod.merge(ctx, force=force, labels=labels) | ||||
_showstats(repo, stats) | _showstats(repo, stats) | ||||
if stats.unresolvedcount: | if stats.unresolvedcount: | ||||
repo.ui.status( | repo.ui.status( |
from . import ( | from . import ( | ||||
hgweb_mod, | hgweb_mod, | ||||
hgwebdir_mod, | hgwebdir_mod, | ||||
server, | server, | ||||
) | ) | ||||
def hgweb(config, name=None, baseui=None): | def hgweb(config, name=None, baseui=None): | ||||
'''create an hgweb wsgi object | """create an hgweb wsgi object | ||||
config can be one of: | config can be one of: | ||||
- repo object (single repo view) | - repo object (single repo view) | ||||
- path to repo (single repo view) | - path to repo (single repo view) | ||||
- path to config file (multi-repo view) | - path to config file (multi-repo view) | ||||
- dict of virtual:real pairs (multi-repo view) | - dict of virtual:real pairs (multi-repo view) | ||||
- list of virtual:real tuples (multi-repo view) | - list of virtual:real tuples (multi-repo view) | ||||
''' | """ | ||||
if isinstance(config, pycompat.unicode): | if isinstance(config, pycompat.unicode): | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'Mercurial only supports encoded strings: %r' % config | b'Mercurial only supports encoded strings: %r' % config | ||||
) | ) | ||||
if ( | if ( | ||||
(isinstance(config, bytes) and not os.path.isdir(config)) | (isinstance(config, bytes) and not os.path.isdir(config)) | ||||
or isinstance(config, dict) | or isinstance(config, dict) |
If userlist has a single '*' member, all users are considered members. | If userlist has a single '*' member, all users are considered members. | ||||
Can be overridden by extensions to provide more complex authorization | Can be overridden by extensions to provide more complex authorization | ||||
schemes. | schemes. | ||||
""" | """ | ||||
return userlist == [b'*'] or username in userlist | return userlist == [b'*'] or username in userlist | ||||
def checkauthz(hgweb, req, op): | def checkauthz(hgweb, req, op): | ||||
'''Check permission for operation based on request data (including | """Check permission for operation based on request data (including | ||||
authentication info). Return if op allowed, else raise an ErrorResponse | authentication info). Return if op allowed, else raise an ErrorResponse | ||||
exception.''' | exception.""" | ||||
user = req.remoteuser | user = req.remoteuser | ||||
deny_read = hgweb.configlist(b'web', b'deny_read') | deny_read = hgweb.configlist(b'web', b'deny_read') | ||||
if deny_read and (not user or ismember(hgweb.repo.ui, user, deny_read)): | if deny_read and (not user or ismember(hgweb.repo.ui, user, deny_read)): | ||||
raise ErrorResponse(HTTP_UNAUTHORIZED, b'read not authorized') | raise ErrorResponse(HTTP_UNAUTHORIZED, b'read not authorized') | ||||
allow_read = hgweb.configlist(b'web', b'allow_read') | allow_read = hgweb.configlist(b'web', b'allow_read') |
mapfile, fp = templater.try_open_template(location, path) | mapfile, fp = templater.try_open_template(location, path) | ||||
if mapfile: | if mapfile: | ||||
return style, mapfile, fp | return style, mapfile, fp | ||||
raise RuntimeError(b"No hgweb templates found in %r" % path) | raise RuntimeError(b"No hgweb templates found in %r" % path) | ||||
def makebreadcrumb(url, prefix=b''): | def makebreadcrumb(url, prefix=b''): | ||||
'''Return a 'URL breadcrumb' list | """Return a 'URL breadcrumb' list | ||||
A 'URL breadcrumb' is a list of URL-name pairs, | A 'URL breadcrumb' is a list of URL-name pairs, | ||||
corresponding to each of the path items on a URL. | corresponding to each of the path items on a URL. | ||||
This can be used to create path navigation entries. | This can be used to create path navigation entries. | ||||
''' | """ | ||||
if url.endswith(b'/'): | if url.endswith(b'/'): | ||||
url = url[:-1] | url = url[:-1] | ||||
if prefix: | if prefix: | ||||
url = b'/' + prefix + url | url = b'/' + prefix + url | ||||
relpath = url | relpath = url | ||||
if relpath.startswith(b'/'): | if relpath.startswith(b'/'): | ||||
relpath = relpath[1:] | relpath = relpath[1:] | ||||
b'is a generator?' | b'is a generator?' | ||||
) | ) | ||||
assert self._bodywritefn | assert self._bodywritefn | ||||
return offsettrackingwriter(self._bodywritefn) | return offsettrackingwriter(self._bodywritefn) | ||||
def wsgiapplication(app_maker): | def wsgiapplication(app_maker): | ||||
'''For compatibility with old CGI scripts. A plain hgweb() or hgwebdir() | """For compatibility with old CGI scripts. A plain hgweb() or hgwebdir() | ||||
can and should now be used as a WSGI application.''' | can and should now be used as a WSGI application.""" | ||||
application = app_maker() | application = app_maker() | ||||
def run_wsgi(env, respond): | def run_wsgi(env, respond): | ||||
return application(env, respond) | return application(env, respond) | ||||
return run_wsgi | return run_wsgi |
b'tags': nodetagsdict(repo, node), | b'tags': nodetagsdict(repo, node), | ||||
b'bookmarks': nodebookmarksdict(repo, node), | b'bookmarks': nodebookmarksdict(repo, node), | ||||
b'parent': lambda context, mapping: parents(ctx), | b'parent': lambda context, mapping: parents(ctx), | ||||
b'child': lambda context, mapping: children(ctx), | b'child': lambda context, mapping: children(ctx), | ||||
} | } | ||||
def changelistentry(web, ctx): | def changelistentry(web, ctx): | ||||
'''Obtain a dictionary to be used for entries in a changelist. | """Obtain a dictionary to be used for entries in a changelist. | ||||
This function is called when producing items for the "entries" list passed | This function is called when producing items for the "entries" list passed | ||||
to the "shortlog" and "changelog" templates. | to the "shortlog" and "changelog" templates. | ||||
''' | """ | ||||
repo = web.repo | repo = web.repo | ||||
rev = ctx.rev() | rev = ctx.rev() | ||||
n = scmutil.binnode(ctx) | n = scmutil.binnode(ctx) | ||||
showtags = showtag(repo, b'changelogtag', n) | showtags = showtag(repo, b'changelogtag', n) | ||||
files = listfilediffs(ctx.files(), n, web.maxfiles) | files = listfilediffs(ctx.files(), n, web.maxfiles) | ||||
entry = commonentry(repo, ctx) | entry = commonentry(repo, ctx) | ||||
entry.update( | entry.update( |
from .utils import ( | from .utils import ( | ||||
procutil, | procutil, | ||||
resourceutil, | resourceutil, | ||||
stringutil, | stringutil, | ||||
) | ) | ||||
def pythonhook(ui, repo, htype, hname, funcname, args, throw): | def pythonhook(ui, repo, htype, hname, funcname, args, throw): | ||||
'''call python hook. hook is callable object, looked up as | """call python hook. hook is callable object, looked up as | ||||
name in python module. if callable returns "true", hook | name in python module. if callable returns "true", hook | ||||
fails, else passes. if hook raises exception, treated as | fails, else passes. if hook raises exception, treated as | ||||
hook failure. exception propagates if throw is "true". | hook failure. exception propagates if throw is "true". | ||||
reason for "true" meaning "hook failed" is so that | reason for "true" meaning "hook failed" is so that | ||||
unmodified commands (e.g. mercurial.commands.update) can | unmodified commands (e.g. mercurial.commands.update) can | ||||
be run as hooks without wrappers to convert return values.''' | be run as hooks without wrappers to convert return values.""" | ||||
if callable(funcname): | if callable(funcname): | ||||
obj = funcname | obj = funcname | ||||
funcname = pycompat.sysbytes(obj.__module__ + "." + obj.__name__) | funcname = pycompat.sysbytes(obj.__module__ + "." + obj.__name__) | ||||
else: | else: | ||||
d = funcname.rfind(b'.') | d = funcname.rfind(b'.') | ||||
if d == -1: | if d == -1: | ||||
raise error.HookLoadError( | raise error.HookLoadError( |
permissions.remove(b'pull') | permissions.remove(b'pull') | ||||
if len(permissions) > 1: | if len(permissions) > 1: | ||||
raise error.RepoError( | raise error.RepoError( | ||||
_(b'cannot make request requiring multiple permissions: %s') | _(b'cannot make request requiring multiple permissions: %s') | ||||
% _(b', ').join(sorted(permissions)) | % _(b', ').join(sorted(permissions)) | ||||
) | ) | ||||
permission = {b'push': b'rw', b'pull': b'ro',}[permissions.pop()] | permission = { | ||||
b'push': b'rw', | |||||
b'pull': b'ro', | |||||
}[permissions.pop()] | |||||
handler, resp = sendv2request( | handler, resp = sendv2request( | ||||
self._ui, | self._ui, | ||||
self._opener, | self._opener, | ||||
self._requestbuilder, | self._requestbuilder, | ||||
self._apiurl, | self._apiurl, | ||||
permission, | permission, | ||||
calls, | calls, | ||||
# init | # init | ||||
# Callable receiving (ui, repourl, servicepath, opener, requestbuilder, | # Callable receiving (ui, repourl, servicepath, opener, requestbuilder, | ||||
# apidescriptor) to create a peer. | # apidescriptor) to create a peer. | ||||
# | # | ||||
# priority | # priority | ||||
# Integer priority for the service. If we could choose from multiple | # Integer priority for the service. If we could choose from multiple | ||||
# services, we choose the one with the highest priority. | # services, we choose the one with the highest priority. | ||||
API_PEERS = { | API_PEERS = { | ||||
wireprototypes.HTTP_WIREPROTO_V2: {b'init': httpv2peer, b'priority': 50,}, | wireprototypes.HTTP_WIREPROTO_V2: { | ||||
b'init': httpv2peer, | |||||
b'priority': 50, | |||||
}, | |||||
} | } | ||||
def performhandshake(ui, url, opener, requestbuilder): | def performhandshake(ui, url, opener, requestbuilder): | ||||
# The handshake is a request to the capabilities command. | # The handshake is a request to the capabilities command. | ||||
caps = None | caps = None | ||||
from __future__ import absolute_import, print_function | from __future__ import absolute_import, print_function | ||||
import contextlib | import contextlib | ||||
from .. import node as nodemod | from .. import node as nodemod | ||||
from . import util as interfaceutil | from . import util as interfaceutil | ||||
class idirstate(interfaceutil.Interface): | class idirstate(interfaceutil.Interface): | ||||
def __init__(opener, ui, root, validate, sparsematchfn): | def __init__(opener, ui, root, validate, sparsematchfn): | ||||
'''Create a new dirstate object. | """Create a new dirstate object. | ||||
opener is an open()-like callable that can be used to open the | opener is an open()-like callable that can be used to open the | ||||
dirstate file; root is the root of the directory tracked by | dirstate file; root is the root of the directory tracked by | ||||
the dirstate. | the dirstate. | ||||
''' | """ | ||||
# TODO: all these private methods and attributes should be made | # TODO: all these private methods and attributes should be made | ||||
# public or removed from the interface. | # public or removed from the interface. | ||||
_ignore = interfaceutil.Attribute("""Matcher for ignored files.""") | _ignore = interfaceutil.Attribute("""Matcher for ignored files.""") | ||||
def _ignorefiles(): | def _ignorefiles(): | ||||
"""Return a list of files containing patterns to ignore.""" | """Return a list of files containing patterns to ignore.""" | ||||
def _ignorefileandline(f): | def _ignorefileandline(f): | ||||
"""Given a file `f`, return the ignore file and line that ignores it.""" | """Given a file `f`, return the ignore file and line that ignores it.""" | ||||
_checklink = interfaceutil.Attribute("""Callable for checking symlinks.""") | _checklink = interfaceutil.Attribute("""Callable for checking symlinks.""") | ||||
_checkexec = interfaceutil.Attribute("""Callable for checking exec bits.""") | _checkexec = interfaceutil.Attribute("""Callable for checking exec bits.""") | ||||
@contextlib.contextmanager | @contextlib.contextmanager | ||||
def parentchange(): | def parentchange(): | ||||
'''Context manager for handling dirstate parents. | """Context manager for handling dirstate parents. | ||||
If an exception occurs in the scope of the context manager, | If an exception occurs in the scope of the context manager, | ||||
the incoherent dirstate won't be written when wlock is | the incoherent dirstate won't be written when wlock is | ||||
released. | released. | ||||
''' | """ | ||||
def pendingparentchange(): | def pendingparentchange(): | ||||
'''Returns true if the dirstate is in the middle of a set of changes | """Returns true if the dirstate is in the middle of a set of changes | ||||
that modify the dirstate parent. | that modify the dirstate parent. | ||||
''' | """ | ||||
def hasdir(d): | def hasdir(d): | ||||
pass | pass | ||||
def flagfunc(buildfallback): | def flagfunc(buildfallback): | ||||
pass | pass | ||||
def getcwd(): | def getcwd(): | ||||
'''Return the path from which a canonical path is calculated. | """Return the path from which a canonical path is calculated. | ||||
This path should be used to resolve file patterns or to convert | This path should be used to resolve file patterns or to convert | ||||
canonical paths back to file paths for display. It shouldn't be | canonical paths back to file paths for display. It shouldn't be | ||||
used to get real file paths. Use vfs functions instead. | used to get real file paths. Use vfs functions instead. | ||||
''' | """ | ||||
def pathto(f, cwd=None): | def pathto(f, cwd=None): | ||||
pass | pass | ||||
def __getitem__(key): | def __getitem__(key): | ||||
'''Return the current state of key (a filename) in the dirstate. | """Return the current state of key (a filename) in the dirstate. | ||||
States are: | States are: | ||||
n normal | n normal | ||||
m needs merging | m needs merging | ||||
r marked for removal | r marked for removal | ||||
a marked for addition | a marked for addition | ||||
? not tracked | ? not tracked | ||||
''' | """ | ||||
def __contains__(key): | def __contains__(key): | ||||
"""Check if bytestring `key` is known to the dirstate.""" | """Check if bytestring `key` is known to the dirstate.""" | ||||
def __iter__(): | def __iter__(): | ||||
"""Iterate the dirstate's contained filenames as bytestrings.""" | """Iterate the dirstate's contained filenames as bytestrings.""" | ||||
def items(): | def items(): | ||||
See localrepo.setparents() | See localrepo.setparents() | ||||
""" | """ | ||||
def setbranch(branch): | def setbranch(branch): | ||||
pass | pass | ||||
def invalidate(): | def invalidate(): | ||||
'''Causes the next access to reread the dirstate. | """Causes the next access to reread the dirstate. | ||||
This is different from localrepo.invalidatedirstate() because it always | This is different from localrepo.invalidatedirstate() because it always | ||||
rereads the dirstate. Use localrepo.invalidatedirstate() if you want to | rereads the dirstate. Use localrepo.invalidatedirstate() if you want to | ||||
check whether the dirstate has changed before rereading it.''' | check whether the dirstate has changed before rereading it.""" | ||||
def copy(source, dest): | def copy(source, dest): | ||||
"""Mark dest as a copy of source. Unmark dest if source is None.""" | """Mark dest as a copy of source. Unmark dest if source is None.""" | ||||
def copied(file): | def copied(file): | ||||
pass | pass | ||||
def copies(): | def copies(): | ||||
pass | pass | ||||
def normal(f, parentfiledata=None): | def normal(f, parentfiledata=None): | ||||
'''Mark a file normal and clean. | """Mark a file normal and clean. | ||||
parentfiledata: (mode, size, mtime) of the clean file | parentfiledata: (mode, size, mtime) of the clean file | ||||
parentfiledata should be computed from memory (for mode, | parentfiledata should be computed from memory (for mode, | ||||
size), as or close as possible from the point where we | size), as or close as possible from the point where we | ||||
determined the file was clean, to limit the risk of the | determined the file was clean, to limit the risk of the | ||||
file having been changed by an external process between the | file having been changed by an external process between the | ||||
moment where the file was determined to be clean and now.''' | moment where the file was determined to be clean and now.""" | ||||
pass | pass | ||||
def normallookup(f): | def normallookup(f): | ||||
'''Mark a file normal, but possibly dirty.''' | '''Mark a file normal, but possibly dirty.''' | ||||
def otherparent(f): | def otherparent(f): | ||||
'''Mark as coming from the other parent, always dirty.''' | '''Mark as coming from the other parent, always dirty.''' | ||||
def add(f): | def add(f): | ||||
'''Mark a file added.''' | '''Mark a file added.''' | ||||
def remove(f): | def remove(f): | ||||
'''Mark a file removed.''' | '''Mark a file removed.''' | ||||
def merge(f): | def merge(f): | ||||
'''Mark a file merged.''' | '''Mark a file merged.''' | ||||
def drop(f): | def drop(f): | ||||
'''Drop a file from the dirstate''' | '''Drop a file from the dirstate''' | ||||
def normalize(path, isknown=False, ignoremissing=False): | def normalize(path, isknown=False, ignoremissing=False): | ||||
''' | """ | ||||
normalize the case of a pathname when on a casefolding filesystem | normalize the case of a pathname when on a casefolding filesystem | ||||
isknown specifies whether the filename came from walking the | isknown specifies whether the filename came from walking the | ||||
disk, to avoid extra filesystem access. | disk, to avoid extra filesystem access. | ||||
If ignoremissing is True, missing path are returned | If ignoremissing is True, missing path are returned | ||||
unchanged. Otherwise, we try harder to normalize possibly | unchanged. Otherwise, we try harder to normalize possibly | ||||
existing path components. | existing path components. | ||||
The normalized case is determined based on the following precedence: | The normalized case is determined based on the following precedence: | ||||
- version of name already stored in the dirstate | - version of name already stored in the dirstate | ||||
- version of name stored on disk | - version of name stored on disk | ||||
- version provided via command arguments | - version provided via command arguments | ||||
''' | """ | ||||
def clear(): | def clear(): | ||||
pass | pass | ||||
def rebuild(parent, allfiles, changedfiles=None): | def rebuild(parent, allfiles, changedfiles=None): | ||||
pass | pass | ||||
def identity(): | def identity(): | ||||
'''Return identity of dirstate it to detect changing in storage | """Return identity of dirstate it to detect changing in storage | ||||
If identity of previous dirstate is equal to this, writing | If identity of previous dirstate is equal to this, writing | ||||
changes based on the former dirstate out can keep consistency. | changes based on the former dirstate out can keep consistency. | ||||
''' | """ | ||||
def write(tr): | def write(tr): | ||||
pass | pass | ||||
def addparentchangecallback(category, callback): | def addparentchangecallback(category, callback): | ||||
"""add a callback to be called when the wd parents are changed | """add a callback to be called when the wd parents are changed | ||||
Callback will be called with the following arguments: | Callback will be called with the following arguments: | ||||
dirstate, (oldp1, oldp2), (newp1, newp2) | dirstate, (oldp1, oldp2), (newp1, newp2) | ||||
Category is a unique identifier to allow overwriting an old callback | Category is a unique identifier to allow overwriting an old callback | ||||
with a newer callback. | with a newer callback. | ||||
""" | """ | ||||
def walk(match, subrepos, unknown, ignored, full=True): | def walk(match, subrepos, unknown, ignored, full=True): | ||||
''' | """ | ||||
Walk recursively through the directory tree, finding all files | Walk recursively through the directory tree, finding all files | ||||
matched by match. | matched by match. | ||||
If full is False, maybe skip some known-clean files. | If full is False, maybe skip some known-clean files. | ||||
Return a dict mapping filename to stat-like object (either | Return a dict mapping filename to stat-like object (either | ||||
mercurial.osutil.stat instance or return value of os.stat()). | mercurial.osutil.stat instance or return value of os.stat()). | ||||
''' | """ | ||||
def status(match, subrepos, ignored, clean, unknown): | def status(match, subrepos, ignored, clean, unknown): | ||||
'''Determine the status of the working copy relative to the | """Determine the status of the working copy relative to the | ||||
dirstate and return a pair of (unsure, status), where status is of type | dirstate and return a pair of (unsure, status), where status is of type | ||||
scmutil.status and: | scmutil.status and: | ||||
unsure: | unsure: | ||||
files that might have been modified since the dirstate was | files that might have been modified since the dirstate was | ||||
written, but need to be read to be sure (size is the same | written, but need to be read to be sure (size is the same | ||||
but mtime differs) | but mtime differs) | ||||
status.modified: | status.modified: | ||||
files that have definitely been modified since the dirstate | files that have definitely been modified since the dirstate | ||||
was written (different size or mode) | was written (different size or mode) | ||||
status.clean: | status.clean: | ||||
files that have definitely not been modified since the | files that have definitely not been modified since the | ||||
dirstate was written | dirstate was written | ||||
''' | """ | ||||
def matches(match): | def matches(match): | ||||
''' | """ | ||||
return files in the dirstate (in whatever state) filtered by match | return files in the dirstate (in whatever state) filtered by match | ||||
''' | """ | ||||
def savebackup(tr, backupname): | def savebackup(tr, backupname): | ||||
'''Save current dirstate into backup file''' | '''Save current dirstate into backup file''' | ||||
def restorebackup(tr, backupname): | def restorebackup(tr, backupname): | ||||
'''Restore dirstate by backup file''' | '''Restore dirstate by backup file''' | ||||
def clearbackup(tr, backupname): | def clearbackup(tr, backupname): | ||||
'''Clear backup file''' | '''Clear backup file''' |
def size(rev): | def size(rev): | ||||
"""Obtain the fulltext size of file data. | """Obtain the fulltext size of file data. | ||||
Any metadata is excluded from size measurements. | Any metadata is excluded from size measurements. | ||||
""" | """ | ||||
def revision(node, raw=False): | def revision(node, raw=False): | ||||
""""Obtain fulltext data for a node. | """ "Obtain fulltext data for a node. | ||||
By default, any storage transformations are applied before the data | By default, any storage transformations are applied before the data | ||||
is returned. If ``raw`` is True, non-raw storage transformations | is returned. If ``raw`` is True, non-raw storage transformations | ||||
are not applied. | are not applied. | ||||
The fulltext data may contain a header containing metadata. Most | The fulltext data may contain a header containing metadata. Most | ||||
consumers should use ``read()`` to obtain the actual file data. | consumers should use ``read()`` to obtain the actual file data. | ||||
""" | """ | ||||
def rawdata(node): | def rawdata(node): | ||||
"""Obtain raw data for a node. | """Obtain raw data for a node.""" | ||||
""" | |||||
def read(node): | def read(node): | ||||
"""Resolve file fulltext data. | """Resolve file fulltext data. | ||||
This is similar to ``revision()`` except any metadata in the data | This is similar to ``revision()`` except any metadata in the data | ||||
headers is stripped. | headers is stripped. | ||||
""" | """ | ||||
DEBUG = None | DEBUG = None | ||||
class ConnectionManager(object): | class ConnectionManager(object): | ||||
""" | """ | ||||
The connection manager must be able to: | The connection manager must be able to: | ||||
* keep track of all existing | * keep track of all existing | ||||
""" | """ | ||||
def __init__(self): | def __init__(self): | ||||
self._lock = threading.Lock() | self._lock = threading.Lock() | ||||
self._hostmap = collections.defaultdict(list) # host -> [connection] | self._hostmap = collections.defaultdict(list) # host -> [connection] | ||||
self._connmap = {} # map connections to host | self._connmap = {} # map connections to host | ||||
self._readymap = {} # map connection to ready state | self._readymap = {} # map connection to ready state | ||||
def add(self, host, connection, ready): | def add(self, host, connection, ready): | ||||
self._broken_pipe_resp = self.getresponse() | self._broken_pipe_resp = self.getresponse() | ||||
reraise = False | reraise = False | ||||
self.close() | self.close() | ||||
if reraise: | if reraise: | ||||
raise | raise | ||||
def wrapgetresponse(cls): | def wrapgetresponse(cls): | ||||
"""Wraps getresponse in cls with a broken-pipe sane version. | """Wraps getresponse in cls with a broken-pipe sane version.""" | ||||
""" | |||||
def safegetresponse(self): | def safegetresponse(self): | ||||
# In safesend() we might set the _broken_pipe_resp | # In safesend() we might set the _broken_pipe_resp | ||||
# attribute, in which case the socket has already | # attribute, in which case the socket has already | ||||
# been closed and we just need to give them the response | # been closed and we just need to give them the response | ||||
# back. Otherwise, we use the normal response path. | # back. Otherwise, we use the normal response path. | ||||
r = getattr(self, '_broken_pipe_resp', None) | r = getattr(self, '_broken_pipe_resp', None) | ||||
if r is not None: | if r is not None: |
# set of (path, vfs-location) tuples. vfs-location is: | # set of (path, vfs-location) tuples. vfs-location is: | ||||
# - 'plain for vfs relative paths | # - 'plain for vfs relative paths | ||||
# - '' for svfs relative paths | # - '' for svfs relative paths | ||||
_cachedfiles = set() | _cachedfiles = set() | ||||
class _basefilecache(scmutil.filecache): | class _basefilecache(scmutil.filecache): | ||||
"""All filecache usage on repo are done for logic that should be unfiltered | """All filecache usage on repo are done for logic that should be unfiltered""" | ||||
""" | |||||
def __get__(self, repo, type=None): | def __get__(self, repo, type=None): | ||||
if repo is None: | if repo is None: | ||||
return self | return self | ||||
# proxy to unfiltered __dict__ since filtered repo has no entry | # proxy to unfiltered __dict__ since filtered repo has no entry | ||||
unfi = repo.unfiltered() | unfi = repo.unfiltered() | ||||
try: | try: | ||||
return unfi.__dict__[self.sname] | return unfi.__dict__[self.sname] | ||||
def commandexecutor(self): | def commandexecutor(self): | ||||
return localcommandexecutor(self) | return localcommandexecutor(self) | ||||
# End of peer interface. | # End of peer interface. | ||||
@interfaceutil.implementer(repository.ipeerlegacycommands) | @interfaceutil.implementer(repository.ipeerlegacycommands) | ||||
class locallegacypeer(localpeer): | class locallegacypeer(localpeer): | ||||
'''peer extension which implements legacy methods too; used for tests with | """peer extension which implements legacy methods too; used for tests with | ||||
restricted capabilities''' | restricted capabilities""" | ||||
def __init__(self, repo): | def __init__(self, repo): | ||||
super(locallegacypeer, self).__init__(repo, caps=legacycaps) | super(locallegacypeer, self).__init__(repo, caps=legacycaps) | ||||
# Begin of baselegacywirecommands interface. | # Begin of baselegacywirecommands interface. | ||||
def between(self, pairs): | def between(self, pairs): | ||||
return self._repo.between(pairs) | return self._repo.between(pairs) | ||||
# | # | ||||
# The function receives a set of requirement strings that the repository | # The function receives a set of requirement strings that the repository | ||||
# is capable of opening. Functions will typically add elements to the | # is capable of opening. Functions will typically add elements to the | ||||
# set to reflect that the extension knows how to handle that requirements. | # set to reflect that the extension knows how to handle that requirements. | ||||
featuresetupfuncs = set() | featuresetupfuncs = set() | ||||
def _getsharedvfs(hgvfs, requirements): | def _getsharedvfs(hgvfs, requirements): | ||||
""" returns the vfs object pointing to root of shared source | """returns the vfs object pointing to root of shared source | ||||
repo for a shared repository | repo for a shared repository | ||||
hgvfs is vfs pointing at .hg/ of current repo (shared one) | hgvfs is vfs pointing at .hg/ of current repo (shared one) | ||||
requirements is a set of requirements of current repo (shared one) | requirements is a set of requirements of current repo (shared one) | ||||
""" | """ | ||||
# The ``shared`` or ``relshared`` requirements indicate the | # The ``shared`` or ``relshared`` requirements indicate the | ||||
# store lives in the path contained in the ``.hg/sharedpath`` file. | # store lives in the path contained in the ``.hg/sharedpath`` file. | ||||
# This is an absolute path for ``shared`` and relative to | # This is an absolute path for ``shared`` and relative to | ||||
# ``.hg/`` for ``relshared``. | # ``.hg/`` for ``relshared``. | ||||
sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n') | sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n') | ||||
if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements: | if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements: | ||||
sharedpath = hgvfs.join(sharedpath) | sharedpath = hgvfs.join(sharedpath) | ||||
sharedvfs = vfsmod.vfs(sharedpath, realpath=True) | sharedvfs = vfsmod.vfs(sharedpath, realpath=True) | ||||
if not sharedvfs.exists(): | if not sharedvfs.exists(): | ||||
raise error.RepoError( | raise error.RepoError( | ||||
_(b'.hg/sharedpath points to nonexistent directory %s') | _(b'.hg/sharedpath points to nonexistent directory %s') | ||||
% sharedvfs.base | % sharedvfs.base | ||||
) | ) | ||||
return sharedvfs | return sharedvfs | ||||
def _readrequires(vfs, allowmissing): | def _readrequires(vfs, allowmissing): | ||||
""" reads the require file present at root of this vfs | """reads the require file present at root of this vfs | ||||
and return a set of requirements | and return a set of requirements | ||||
If allowmissing is True, we suppress ENOENT if raised""" | If allowmissing is True, we suppress ENOENT if raised""" | ||||
# requires file contains a newline-delimited list of | # requires file contains a newline-delimited list of | ||||
# features/capabilities the opener (us) must have in order to use | # features/capabilities the opener (us) must have in order to use | ||||
# the repository. This file was introduced in Mercurial 0.9.2, | # the repository. This file was introduced in Mercurial 0.9.2, | ||||
# which means very old repositories may not have one. We assume | # which means very old repositories may not have one. We assume | ||||
# a missing file translates to no requirements. | # a missing file translates to no requirements. | ||||
# no need to pay the cost of repoview.changelog | # no need to pay the cost of repoview.changelog | ||||
unfi = self.unfiltered() | unfi = self.unfiltered() | ||||
return len(unfi.changelog) | return len(unfi.changelog) | ||||
def __iter__(self): | def __iter__(self): | ||||
return iter(self.changelog) | return iter(self.changelog) | ||||
def revs(self, expr, *args): | def revs(self, expr, *args): | ||||
'''Find revisions matching a revset. | """Find revisions matching a revset. | ||||
The revset is specified as a string ``expr`` that may contain | The revset is specified as a string ``expr`` that may contain | ||||
%-formatting to escape certain types. See ``revsetlang.formatspec``. | %-formatting to escape certain types. See ``revsetlang.formatspec``. | ||||
Revset aliases from the configuration are not expanded. To expand | Revset aliases from the configuration are not expanded. To expand | ||||
user aliases, consider calling ``scmutil.revrange()`` or | user aliases, consider calling ``scmutil.revrange()`` or | ||||
``repo.anyrevs([expr], user=True)``. | ``repo.anyrevs([expr], user=True)``. | ||||
Returns a smartset.abstractsmartset, which is a list-like interface | Returns a smartset.abstractsmartset, which is a list-like interface | ||||
that contains integer revisions. | that contains integer revisions. | ||||
''' | """ | ||||
tree = revsetlang.spectree(expr, *args) | tree = revsetlang.spectree(expr, *args) | ||||
return revset.makematcher(tree)(self) | return revset.makematcher(tree)(self) | ||||
def set(self, expr, *args): | def set(self, expr, *args): | ||||
'''Find revisions matching a revset and emit changectx instances. | """Find revisions matching a revset and emit changectx instances. | ||||
This is a convenience wrapper around ``revs()`` that iterates the | This is a convenience wrapper around ``revs()`` that iterates the | ||||
result and is a generator of changectx instances. | result and is a generator of changectx instances. | ||||
Revset aliases from the configuration are not expanded. To expand | Revset aliases from the configuration are not expanded. To expand | ||||
user aliases, consider calling ``scmutil.revrange()``. | user aliases, consider calling ``scmutil.revrange()``. | ||||
''' | """ | ||||
for r in self.revs(expr, *args): | for r in self.revs(expr, *args): | ||||
yield self[r] | yield self[r] | ||||
def anyrevs(self, specs, user=False, localalias=None): | def anyrevs(self, specs, user=False, localalias=None): | ||||
'''Find revisions matching one of the given revsets. | """Find revisions matching one of the given revsets. | ||||
Revset aliases from the configuration are not expanded by default. To | Revset aliases from the configuration are not expanded by default. To | ||||
expand user aliases, specify ``user=True``. To provide some local | expand user aliases, specify ``user=True``. To provide some local | ||||
definitions overriding user aliases, set ``localalias`` to | definitions overriding user aliases, set ``localalias`` to | ||||
``{name: definitionstring}``. | ``{name: definitionstring}``. | ||||
''' | """ | ||||
if specs == [b'null']: | if specs == [b'null']: | ||||
return revset.baseset([nullrev]) | return revset.baseset([nullrev]) | ||||
if specs == [b'.']: | if specs == [b'.']: | ||||
quick_data = self._quick_access_changeid.get(b'.') | quick_data = self._quick_access_changeid.get(b'.') | ||||
if quick_data is not None: | if quick_data is not None: | ||||
return revset.baseset([quick_data[0]]) | return revset.baseset([quick_data[0]]) | ||||
if user: | if user: | ||||
m = revset.matchany( | m = revset.matchany( | ||||
This a convenience method to aid invoking hooks. Extensions likely | This a convenience method to aid invoking hooks. Extensions likely | ||||
won't call this unless they have registered a custom hook or are | won't call this unless they have registered a custom hook or are | ||||
replacing code that is expected to call a hook. | replacing code that is expected to call a hook. | ||||
""" | """ | ||||
return hook.hook(self.ui, self, name, throw, **args) | return hook.hook(self.ui, self, name, throw, **args) | ||||
@filteredpropertycache | @filteredpropertycache | ||||
def _tagscache(self): | def _tagscache(self): | ||||
'''Returns a tagscache object that contains various tags related | """Returns a tagscache object that contains various tags related | ||||
caches.''' | caches.""" | ||||
# This simplifies its cache management by having one decorated | # This simplifies its cache management by having one decorated | ||||
# function (this one) and the rest simply fetch things from it. | # function (this one) and the rest simply fetch things from it. | ||||
class tagscache(object): | class tagscache(object): | ||||
def __init__(self): | def __init__(self): | ||||
# These two define the set of tags for this repository. tags | # These two define the set of tags for this repository. tags | ||||
# maps tag name to node; tagtypes maps tag name to 'global' or | # maps tag name to node; tagtypes maps tag name to 'global' or | ||||
# 'local'. (Global tags are defined by .hgtags across all | # 'local'. (Global tags are defined by .hgtags across all | ||||
# ignore tags to unknown nodes | # ignore tags to unknown nodes | ||||
rev(v) | rev(v) | ||||
t[k] = v | t[k] = v | ||||
except (error.LookupError, ValueError): | except (error.LookupError, ValueError): | ||||
pass | pass | ||||
return t | return t | ||||
def _findtags(self): | def _findtags(self): | ||||
'''Do the hard work of finding tags. Return a pair of dicts | """Do the hard work of finding tags. Return a pair of dicts | ||||
(tags, tagtypes) where tags maps tag name to node, and tagtypes | (tags, tagtypes) where tags maps tag name to node, and tagtypes | ||||
maps tag name to a string like \'global\' or \'local\'. | maps tag name to a string like \'global\' or \'local\'. | ||||
Subclasses or extensions are free to add their own tags, but | Subclasses or extensions are free to add their own tags, but | ||||
should be aware that the returned dicts will be retained for the | should be aware that the returned dicts will be retained for the | ||||
duration of the localrepo object.''' | duration of the localrepo object.""" | ||||
# XXX what tagtype should subclasses/extensions use? Currently | # XXX what tagtype should subclasses/extensions use? Currently | ||||
# mq and bookmarks add tags, but do not set the tagtype at all. | # mq and bookmarks add tags, but do not set the tagtype at all. | ||||
# Should each extension invent its own tag type? Should there | # Should each extension invent its own tag type? Should there | ||||
# be one tagtype for all such "virtual" tags? Or is the status | # be one tagtype for all such "virtual" tags? Or is the status | ||||
# quo fine? | # quo fine? | ||||
# map tag name to (node, hist) | # map tag name to (node, hist) | ||||
tags[b'tip'] = self.changelog.tip() | tags[b'tip'] = self.changelog.tip() | ||||
tagtypes = { | tagtypes = { | ||||
encoding.tolocal(name): value | encoding.tolocal(name): value | ||||
for (name, value) in pycompat.iteritems(tagtypes) | for (name, value) in pycompat.iteritems(tagtypes) | ||||
} | } | ||||
return (tags, tagtypes) | return (tags, tagtypes) | ||||
def tagtype(self, tagname): | def tagtype(self, tagname): | ||||
''' | """ | ||||
return the type of the given tag. result can be: | return the type of the given tag. result can be: | ||||
'local' : a local tag | 'local' : a local tag | ||||
'global' : a global tag | 'global' : a global tag | ||||
None : tag does not exist | None : tag does not exist | ||||
''' | """ | ||||
return self._tagscache.tagtypes.get(tagname) | return self._tagscache.tagtypes.get(tagname) | ||||
def tagslist(self): | def tagslist(self): | ||||
'''return a list of tags ordered by revision''' | '''return a list of tags ordered by revision''' | ||||
if not self._tagscache.tagslist: | if not self._tagscache.tagslist: | ||||
l = [] | l = [] | ||||
for t, n in pycompat.iteritems(self.tags()): | for t, n in pycompat.iteritems(self.tags()): | ||||
self._tagscache.nodetagscache = nodetagscache | self._tagscache.nodetagscache = nodetagscache | ||||
return self._tagscache.nodetagscache.get(node, []) | return self._tagscache.nodetagscache.get(node, []) | ||||
def nodebookmarks(self, node): | def nodebookmarks(self, node): | ||||
"""return the list of bookmarks pointing to the specified node""" | """return the list of bookmarks pointing to the specified node""" | ||||
return self._bookmarks.names(node) | return self._bookmarks.names(node) | ||||
def branchmap(self): | def branchmap(self): | ||||
'''returns a dictionary {branch: [branchheads]} with branchheads | """returns a dictionary {branch: [branchheads]} with branchheads | ||||
ordered by increasing revision number''' | ordered by increasing revision number""" | ||||
return self._branchcaches[self] | return self._branchcaches[self] | ||||
@unfilteredmethod | @unfilteredmethod | ||||
def revbranchcache(self): | def revbranchcache(self): | ||||
if not self._revbranchcache: | if not self._revbranchcache: | ||||
self._revbranchcache = branchmap.revbranchcache(self.unfiltered()) | self._revbranchcache = branchmap.revbranchcache(self.unfiltered()) | ||||
return self._revbranchcache | return self._revbranchcache | ||||
def branchtip(self, branch, ignoremissing=False): | def branchtip(self, branch, ignoremissing=False): | ||||
'''return the tip node for a given branch | """return the tip node for a given branch | ||||
If ignoremissing is True, then this method will not raise an error. | If ignoremissing is True, then this method will not raise an error. | ||||
This is helpful for callers that only expect None for a missing branch | This is helpful for callers that only expect None for a missing branch | ||||
(e.g. namespace). | (e.g. namespace). | ||||
''' | """ | ||||
try: | try: | ||||
return self.branchmap().branchtip(branch) | return self.branchmap().branchtip(branch) | ||||
except KeyError: | except KeyError: | ||||
if not ignoremissing: | if not ignoremissing: | ||||
raise error.RepoLookupError(_(b"unknown branch '%s'") % branch) | raise error.RepoLookupError(_(b"unknown branch '%s'") % branch) | ||||
else: | else: | ||||
pass | pass | ||||
return self.vfs.reljoin(self.root, f, *insidef) | return self.vfs.reljoin(self.root, f, *insidef) | ||||
def setparents(self, p1, p2=nullid): | def setparents(self, p1, p2=nullid): | ||||
self[None].setparents(p1, p2) | self[None].setparents(p1, p2) | ||||
self._quick_access_changeid_invalidate() | self._quick_access_changeid_invalidate() | ||||
def filectx(self, path, changeid=None, fileid=None, changectx=None): | def filectx(self, path, changeid=None, fileid=None, changectx=None): | ||||
"""changeid must be a changeset revision, if specified. | """changeid must be a changeset revision, if specified. | ||||
fileid can be a file revision or node.""" | fileid can be a file revision or node.""" | ||||
return context.filectx( | return context.filectx( | ||||
self, path, changeid, fileid, changectx=changectx | self, path, changeid, fileid, changectx=changectx | ||||
) | ) | ||||
def getcwd(self): | def getcwd(self): | ||||
return self.dirstate.getcwd() | return self.dirstate.getcwd() | ||||
def pathto(self, f, cwd=None): | def pathto(self, f, cwd=None): | ||||
tr.hookargs[b'txnname'] = desc | tr.hookargs[b'txnname'] = desc | ||||
tr.hookargs[b'changes'] = tr.changes | tr.hookargs[b'changes'] = tr.changes | ||||
# note: writing the fncache only during finalize mean that the file is | # note: writing the fncache only during finalize mean that the file is | ||||
# outdated when running hooks. As fncache is used for streaming clone, | # outdated when running hooks. As fncache is used for streaming clone, | ||||
# this is not expected to break anything that happen during the hooks. | # this is not expected to break anything that happen during the hooks. | ||||
tr.addfinalize(b'flush-fncache', self.store.write) | tr.addfinalize(b'flush-fncache', self.store.write) | ||||
def txnclosehook(tr2): | def txnclosehook(tr2): | ||||
"""To be run if transaction is successful, will schedule a hook run | """To be run if transaction is successful, will schedule a hook run""" | ||||
""" | |||||
# Don't reference tr2 in hook() so we don't hold a reference. | # Don't reference tr2 in hook() so we don't hold a reference. | ||||
# This reduces memory consumption when there are multiple | # This reduces memory consumption when there are multiple | ||||
# transactions per lock. This can likely go away if issue5045 | # transactions per lock. This can likely go away if issue5045 | ||||
# fixes the function accumulation. | # fixes the function accumulation. | ||||
hookargs = tr2.hookargs | hookargs = tr2.hookargs | ||||
def hookfunc(unused_success): | def hookfunc(unused_success): | ||||
repo = reporef() | repo = reporef() | ||||
tr.addfinalize(b'txnclose-hook', txnclosehook) | tr.addfinalize(b'txnclose-hook', txnclosehook) | ||||
# Include a leading "-" to make it happen before the transaction summary | # Include a leading "-" to make it happen before the transaction summary | ||||
# reports registered via scmutil.registersummarycallback() whose names | # reports registered via scmutil.registersummarycallback() whose names | ||||
# are 00-txnreport etc. That way, the caches will be warm when the | # are 00-txnreport etc. That way, the caches will be warm when the | ||||
# callbacks run. | # callbacks run. | ||||
tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr)) | tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr)) | ||||
def txnaborthook(tr2): | def txnaborthook(tr2): | ||||
"""To be run if transaction is aborted | """To be run if transaction is aborted""" | ||||
""" | |||||
reporef().hook( | reporef().hook( | ||||
b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs) | b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs) | ||||
) | ) | ||||
tr.addabort(b'txnabort-hook', txnaborthook) | tr.addabort(b'txnabort-hook', txnaborthook) | ||||
# avoid eager cache invalidation. in-memory data should be identical | # avoid eager cache invalidation. in-memory data should be identical | ||||
# to stored data if transaction has no error. | # to stored data if transaction has no error. | ||||
tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats) | tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats) | ||||
self._sparsesignaturecache.clear() | self._sparsesignaturecache.clear() | ||||
def invalidatevolatilesets(self): | def invalidatevolatilesets(self): | ||||
self.filteredrevcache.clear() | self.filteredrevcache.clear() | ||||
obsolete.clearobscaches(self) | obsolete.clearobscaches(self) | ||||
self._quick_access_changeid_invalidate() | self._quick_access_changeid_invalidate() | ||||
def invalidatedirstate(self): | def invalidatedirstate(self): | ||||
'''Invalidates the dirstate, causing the next call to dirstate | """Invalidates the dirstate, causing the next call to dirstate | ||||
to check if it was modified since the last time it was read, | to check if it was modified since the last time it was read, | ||||
rereading it if it has. | rereading it if it has. | ||||
This is different to dirstate.invalidate() that it doesn't always | This is different to dirstate.invalidate() that it doesn't always | ||||
rereads the dirstate. Use dirstate.invalidate() if you want to | rereads the dirstate. Use dirstate.invalidate() if you want to | ||||
explicitly read the dirstate again (i.e. restoring it to a previous | explicitly read the dirstate again (i.e. restoring it to a previous | ||||
known good state).''' | known good state).""" | ||||
if hasunfilteredcache(self, 'dirstate'): | if hasunfilteredcache(self, 'dirstate'): | ||||
for k in self.dirstate._filecache: | for k in self.dirstate._filecache: | ||||
try: | try: | ||||
delattr(self.dirstate, k) | delattr(self.dirstate, k) | ||||
except AttributeError: | except AttributeError: | ||||
pass | pass | ||||
delattr(self.unfiltered(), 'dirstate') | delattr(self.unfiltered(), 'dirstate') | ||||
def invalidate(self, clearfilecache=False): | def invalidate(self, clearfilecache=False): | ||||
'''Invalidates both store and non-store parts other than dirstate | """Invalidates both store and non-store parts other than dirstate | ||||
If a transaction is running, invalidation of store is omitted, | If a transaction is running, invalidation of store is omitted, | ||||
because discarding in-memory changes might cause inconsistency | because discarding in-memory changes might cause inconsistency | ||||
(e.g. incomplete fncache causes unintentional failure, but | (e.g. incomplete fncache causes unintentional failure, but | ||||
redundant one doesn't). | redundant one doesn't). | ||||
''' | """ | ||||
unfiltered = self.unfiltered() # all file caches are stored unfiltered | unfiltered = self.unfiltered() # all file caches are stored unfiltered | ||||
for k in list(self._filecache.keys()): | for k in list(self._filecache.keys()): | ||||
# dirstate is invalidated separately in invalidatedirstate() | # dirstate is invalidated separately in invalidatedirstate() | ||||
if k == b'dirstate': | if k == b'dirstate': | ||||
continue | continue | ||||
if ( | if ( | ||||
k == b'changelog' | k == b'changelog' | ||||
and self.currenttransaction() | and self.currenttransaction() | ||||
self.invalidatecaches() | self.invalidatecaches() | ||||
if not self.currenttransaction(): | if not self.currenttransaction(): | ||||
# TODO: Changing contents of store outside transaction | # TODO: Changing contents of store outside transaction | ||||
# causes inconsistency. We should make in-memory store | # causes inconsistency. We should make in-memory store | ||||
# changes detectable, and abort if changed. | # changes detectable, and abort if changed. | ||||
self.store.invalidatecaches() | self.store.invalidatecaches() | ||||
def invalidateall(self): | def invalidateall(self): | ||||
'''Fully invalidates both store and non-store parts, causing the | """Fully invalidates both store and non-store parts, causing the | ||||
subsequent operation to reread any outside changes.''' | subsequent operation to reread any outside changes.""" | ||||
# extension should hook this to invalidate its caches | # extension should hook this to invalidate its caches | ||||
self.invalidate() | self.invalidate() | ||||
self.invalidatedirstate() | self.invalidatedirstate() | ||||
@unfilteredmethod | @unfilteredmethod | ||||
def _refreshfilecachestats(self, tr): | def _refreshfilecachestats(self, tr): | ||||
"""Reload stats of cached files so that they are flagged as valid""" | """Reload stats of cached files so that they are flagged as valid""" | ||||
for k, ce in self._filecache.items(): | for k, ce in self._filecache.items(): | ||||
k = pycompat.sysstr(k) | k = pycompat.sysstr(k) | ||||
if k == 'dirstate' or k not in self.__dict__: | if k == 'dirstate' or k not in self.__dict__: | ||||
continue | continue | ||||
ce.refresh() | ce.refresh() | ||||
def _lock( | def _lock( | ||||
self, vfs, lockname, wait, releasefn, acquirefn, desc, | self, | ||||
vfs, | |||||
lockname, | |||||
wait, | |||||
releasefn, | |||||
acquirefn, | |||||
desc, | |||||
): | ): | ||||
timeout = 0 | timeout = 0 | ||||
warntimeout = 0 | warntimeout = 0 | ||||
if wait: | if wait: | ||||
timeout = self.ui.configint(b"ui", b"timeout") | timeout = self.ui.configint(b"ui", b"timeout") | ||||
warntimeout = self.ui.configint(b"ui", b"timeout.warn") | warntimeout = self.ui.configint(b"ui", b"timeout.warn") | ||||
# internal config: ui.signal-safe-lock | # internal config: ui.signal-safe-lock | ||||
signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock') | signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock') | ||||
l = ref and ref() | l = ref and ref() | ||||
if l and l.held: | if l and l.held: | ||||
l.postrelease.append(callback) | l.postrelease.append(callback) | ||||
break | break | ||||
else: # no lock have been found. | else: # no lock have been found. | ||||
callback(True) | callback(True) | ||||
def lock(self, wait=True): | def lock(self, wait=True): | ||||
'''Lock the repository store (.hg/store) and return a weak reference | """Lock the repository store (.hg/store) and return a weak reference | ||||
to the lock. Use this before modifying the store (e.g. committing or | to the lock. Use this before modifying the store (e.g. committing or | ||||
stripping). If you are opening a transaction, get a lock as well.) | stripping). If you are opening a transaction, get a lock as well.) | ||||
If both 'lock' and 'wlock' must be acquired, ensure you always acquires | If both 'lock' and 'wlock' must be acquired, ensure you always acquires | ||||
'wlock' first to avoid a dead-lock hazard.''' | 'wlock' first to avoid a dead-lock hazard.""" | ||||
l = self._currentlock(self._lockref) | l = self._currentlock(self._lockref) | ||||
if l is not None: | if l is not None: | ||||
l.lock() | l.lock() | ||||
return l | return l | ||||
l = self._lock( | l = self._lock( | ||||
vfs=self.svfs, | vfs=self.svfs, | ||||
lockname=b"lock", | lockname=b"lock", | ||||
wait=wait, | wait=wait, | ||||
releasefn=None, | releasefn=None, | ||||
acquirefn=self.invalidate, | acquirefn=self.invalidate, | ||||
desc=_(b'repository %s') % self.origroot, | desc=_(b'repository %s') % self.origroot, | ||||
) | ) | ||||
self._lockref = weakref.ref(l) | self._lockref = weakref.ref(l) | ||||
return l | return l | ||||
def wlock(self, wait=True): | def wlock(self, wait=True): | ||||
'''Lock the non-store parts of the repository (everything under | """Lock the non-store parts of the repository (everything under | ||||
.hg except .hg/store) and return a weak reference to the lock. | .hg except .hg/store) and return a weak reference to the lock. | ||||
Use this before modifying files in .hg. | Use this before modifying files in .hg. | ||||
If both 'lock' and 'wlock' must be acquired, ensure you always acquires | If both 'lock' and 'wlock' must be acquired, ensure you always acquires | ||||
'wlock' first to avoid a dead-lock hazard.''' | 'wlock' first to avoid a dead-lock hazard.""" | ||||
l = self._wlockref and self._wlockref() | l = self._wlockref and self._wlockref() | ||||
if l is not None and l.held: | if l is not None and l.held: | ||||
l.lock() | l.lock() | ||||
return l | return l | ||||
# We do not need to check for non-waiting lock acquisition. Such | # We do not need to check for non-waiting lock acquisition. Such | ||||
# acquisition would not cause dead-lock as they would just fail. | # acquisition would not cause dead-lock as they would just fail. | ||||
if wait and ( | if wait and ( | ||||
return ret | return ret | ||||
@unfilteredmethod | @unfilteredmethod | ||||
def commitctx(self, ctx, error=False, origctx=None): | def commitctx(self, ctx, error=False, origctx=None): | ||||
return commit.commitctx(self, ctx, error=error, origctx=origctx) | return commit.commitctx(self, ctx, error=error, origctx=origctx) | ||||
@unfilteredmethod | @unfilteredmethod | ||||
def destroying(self): | def destroying(self): | ||||
'''Inform the repository that nodes are about to be destroyed. | """Inform the repository that nodes are about to be destroyed. | ||||
Intended for use by strip and rollback, so there's a common | Intended for use by strip and rollback, so there's a common | ||||
place for anything that has to be done before destroying history. | place for anything that has to be done before destroying history. | ||||
This is mostly useful for saving state that is in memory and waiting | This is mostly useful for saving state that is in memory and waiting | ||||
to be flushed when the current lock is released. Because a call to | to be flushed when the current lock is released. Because a call to | ||||
destroyed is imminent, the repo will be invalidated causing those | destroyed is imminent, the repo will be invalidated causing those | ||||
changes to stay in memory (waiting for the next unlock), or vanish | changes to stay in memory (waiting for the next unlock), or vanish | ||||
completely. | completely. | ||||
''' | """ | ||||
# When using the same lock to commit and strip, the phasecache is left | # When using the same lock to commit and strip, the phasecache is left | ||||
# dirty after committing. Then when we strip, the repo is invalidated, | # dirty after committing. Then when we strip, the repo is invalidated, | ||||
# causing those changes to disappear. | # causing those changes to disappear. | ||||
if '_phasecache' in vars(self): | if '_phasecache' in vars(self): | ||||
self._phasecache.write() | self._phasecache.write() | ||||
@unfilteredmethod | @unfilteredmethod | ||||
def destroyed(self): | def destroyed(self): | ||||
'''Inform the repository that nodes have been destroyed. | """Inform the repository that nodes have been destroyed. | ||||
Intended for use by strip and rollback, so there's a common | Intended for use by strip and rollback, so there's a common | ||||
place for anything that has to be done after destroying history. | place for anything that has to be done after destroying history. | ||||
''' | """ | ||||
# When one tries to: | # When one tries to: | ||||
# 1) destroy nodes thus calling this method (e.g. strip) | # 1) destroy nodes thus calling this method (e.g. strip) | ||||
# 2) use phasecache somewhere (e.g. commit) | # 2) use phasecache somewhere (e.g. commit) | ||||
# | # | ||||
# then 2) will fail because the phasecache contains nodes that were | # then 2) will fail because the phasecache contains nodes that were | ||||
# removed. We can either remove phasecache from the filecache, | # removed. We can either remove phasecache from the filecache, | ||||
# causing it to reload next time it is accessed, or simply filter | # causing it to reload next time it is accessed, or simply filter | ||||
# the removed nodes now and write the updated cache. | # the removed nodes now and write the updated cache. | ||||
headrevs = reversed(cl.headrevs()) | headrevs = reversed(cl.headrevs()) | ||||
return [cl.node(rev) for rev in headrevs] | return [cl.node(rev) for rev in headrevs] | ||||
heads = self.changelog.heads(start) | heads = self.changelog.heads(start) | ||||
# sort the output in rev descending order | # sort the output in rev descending order | ||||
return sorted(heads, key=self.changelog.rev, reverse=True) | return sorted(heads, key=self.changelog.rev, reverse=True) | ||||
def branchheads(self, branch=None, start=None, closed=False): | def branchheads(self, branch=None, start=None, closed=False): | ||||
'''return a (possibly filtered) list of heads for the given branch | """return a (possibly filtered) list of heads for the given branch | ||||
Heads are returned in topological order, from newest to oldest. | Heads are returned in topological order, from newest to oldest. | ||||
If branch is None, use the dirstate branch. | If branch is None, use the dirstate branch. | ||||
If start is not None, return only heads reachable from start. | If start is not None, return only heads reachable from start. | ||||
If closed is True, return heads that are marked as closed as well. | If closed is True, return heads that are marked as closed as well. | ||||
''' | """ | ||||
if branch is None: | if branch is None: | ||||
branch = self[None].branch() | branch = self[None].branch() | ||||
branches = self.branchmap() | branches = self.branchmap() | ||||
if not branches.hasbranch(branch): | if not branches.hasbranch(branch): | ||||
return [] | return [] | ||||
# the cache returns heads ordered lowest to highest | # the cache returns heads ordered lowest to highest | ||||
bheads = list(reversed(branches.branchheads(branch, closed=closed))) | bheads = list(reversed(branches.branchheads(branch, closed=closed))) | ||||
if start is not None: | if start is not None: | ||||
# requirement | # requirement | ||||
if ui.configbool(b'format', b'exp-share-safe'): | if ui.configbool(b'format', b'exp-share-safe'): | ||||
requirements.add(requirementsmod.SHARESAFE_REQUIREMENT) | requirements.add(requirementsmod.SHARESAFE_REQUIREMENT) | ||||
return requirements | return requirements | ||||
def checkrequirementscompat(ui, requirements): | def checkrequirementscompat(ui, requirements): | ||||
""" Checks compatibility of repository requirements enabled and disabled. | """Checks compatibility of repository requirements enabled and disabled. | ||||
Returns a set of requirements which needs to be dropped because dependend | Returns a set of requirements which needs to be dropped because dependend | ||||
requirements are not enabled. Also warns users about it """ | requirements are not enabled. Also warns users about it""" | ||||
dropped = set() | dropped = set() | ||||
if b'store' not in requirements: | if b'store' not in requirements: | ||||
if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements: | if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements: | ||||
ui.warn( | ui.warn( | ||||
_( | _( | ||||
b'ignoring enabled \'format.bookmarks-in-store\' config ' | b'ignoring enabled \'format.bookmarks-in-store\' config ' |
else: | else: | ||||
ui.debug(b"got lock after %d seconds\n" % l.delay) | ui.debug(b"got lock after %d seconds\n" % l.delay) | ||||
if l.acquirefn: | if l.acquirefn: | ||||
l.acquirefn() | l.acquirefn() | ||||
return l | return l | ||||
class lock(object): | class lock(object): | ||||
'''An advisory lock held by one process to control access to a set | """An advisory lock held by one process to control access to a set | ||||
of files. Non-cooperating processes or incorrectly written scripts | of files. Non-cooperating processes or incorrectly written scripts | ||||
can ignore Mercurial's locking scheme and stomp all over the | can ignore Mercurial's locking scheme and stomp all over the | ||||
repository, so don't do that. | repository, so don't do that. | ||||
Typically used via localrepository.lock() to lock the repository | Typically used via localrepository.lock() to lock the repository | ||||
store (.hg/store/) or localrepository.wlock() to lock everything | store (.hg/store/) or localrepository.wlock() to lock everything | ||||
else under .hg/.''' | else under .hg/.""" | ||||
# lock is symlink on platforms that support it, file on others. | # lock is symlink on platforms that support it, file on others. | ||||
# symlink is used because create of directory entry and contents | # symlink is used because create of directory entry and contents | ||||
# are atomic even over nfs. | # are atomic even over nfs. | ||||
# old-style lock: symlink to pid | # old-style lock: symlink to pid | ||||
# new-style lock: symlink to hostname:pid | # new-style lock: symlink to hostname:pid |
if obsfate: | if obsfate: | ||||
for obsfateline in obsfate: | for obsfateline in obsfate: | ||||
self.ui.write( | self.ui.write( | ||||
self._columns[b'obsolete'] % obsfateline, | self._columns[b'obsolete'] % obsfateline, | ||||
label=b'log.obsfate', | label=b'log.obsfate', | ||||
) | ) | ||||
def _exthook(self, ctx): | def _exthook(self, ctx): | ||||
'''empty method used by extension as a hook point | """empty method used by extension as a hook point""" | ||||
''' | |||||
def _showpatch(self, ctx, graphwidth=0): | def _showpatch(self, ctx, graphwidth=0): | ||||
if self._includestat: | if self._includestat: | ||||
self._differ.showdiff( | self._differ.showdiff( | ||||
self.ui, ctx, self._diffopts, graphwidth, stat=True | self.ui, ctx, self._diffopts, graphwidth, stat=True | ||||
) | ) | ||||
if self._includestat and self._includediff: | if self._includestat and self._includediff: | ||||
self.ui.write(b"\n") | self.ui.write(b"\n") | ||||
fm.data(diffstat=self.ui.popbuffer()) | fm.data(diffstat=self.ui.popbuffer()) | ||||
if self._includediff or b'diff' in datahint: | if self._includediff or b'diff' in datahint: | ||||
self.ui.pushbuffer() | self.ui.pushbuffer() | ||||
self._differ.showdiff(self.ui, ctx, self._diffopts, stat=False) | self._differ.showdiff(self.ui, ctx, self._diffopts, stat=False) | ||||
fm.data(diff=self.ui.popbuffer()) | fm.data(diff=self.ui.popbuffer()) | ||||
class changesettemplater(changesetprinter): | class changesettemplater(changesetprinter): | ||||
'''format changeset information. | """format changeset information. | ||||
Note: there are a variety of convenience functions to build a | Note: there are a variety of convenience functions to build a | ||||
changesettemplater for common cases. See functions such as: | changesettemplater for common cases. See functions such as: | ||||
maketemplater, changesetdisplayer, buildcommittemplate, or other | maketemplater, changesetdisplayer, buildcommittemplate, or other | ||||
functions that use changesest_templater. | functions that use changesest_templater. | ||||
''' | """ | ||||
# Arguments before "buffered" used to be positional. Consider not | # Arguments before "buffered" used to be positional. Consider not | ||||
# adding/removing arguments before "buffered" to not break callers. | # adding/removing arguments before "buffered" to not break callers. | ||||
def __init__( | def __init__( | ||||
self, ui, repo, tmplspec, differ=None, diffopts=None, buffered=False | self, ui, repo, tmplspec, differ=None, diffopts=None, buffered=False | ||||
): | ): | ||||
changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered) | changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered) | ||||
# tres is shared with _graphnodeformatter() | # tres is shared with _graphnodeformatter() |
pull or clone operation. | pull or clone operation. | ||||
localrepo is our local repository | localrepo is our local repository | ||||
remoterepo is the peer instance | remoterepo is the peer instance | ||||
""" | """ | ||||
remotepath = activepath(localrepo, remoterepo) | remotepath = activepath(localrepo, remoterepo) | ||||
with remoterepo.commandexecutor() as e: | with remoterepo.commandexecutor() as e: | ||||
bookmarks = e.callcommand( | bookmarks = e.callcommand( | ||||
b'listkeys', {b'namespace': b'bookmarks',} | b'listkeys', | ||||
{ | |||||
b'namespace': b'bookmarks', | |||||
}, | |||||
).result() | ).result() | ||||
# on a push, we don't want to keep obsolete heads since | # on a push, we don't want to keep obsolete heads since | ||||
# they won't show up as heads on the next pull, so we | # they won't show up as heads on the next pull, so we | ||||
# remove them here otherwise we would require the user | # remove them here otherwise we would require the user | ||||
# to issue a pull to refresh the storage | # to issue a pull to refresh the storage | ||||
bmap = {} | bmap = {} | ||||
repo = localrepo.unfiltered() | repo = localrepo.unfiltered() |
if pycompat.TYPE_CHECKING: | if pycompat.TYPE_CHECKING: | ||||
from typing import Any, List, Tuple, Union | from typing import Any, List, Tuple, Union | ||||
# keep pyflakes happy | # keep pyflakes happy | ||||
assert all((Any, List, Tuple, Union)) | assert all((Any, List, Tuple, Union)) | ||||
class STARTTLS(smtplib.SMTP): | class STARTTLS(smtplib.SMTP): | ||||
'''Derived class to verify the peer certificate for STARTTLS. | """Derived class to verify the peer certificate for STARTTLS. | ||||
This class allows to pass any keyword arguments to SSL socket creation. | This class allows to pass any keyword arguments to SSL socket creation. | ||||
''' | """ | ||||
def __init__(self, ui, host=None, **kwargs): | def __init__(self, ui, host=None, **kwargs): | ||||
smtplib.SMTP.__init__(self, **kwargs) | smtplib.SMTP.__init__(self, **kwargs) | ||||
self._ui = ui | self._ui = ui | ||||
self._host = host | self._host = host | ||||
def starttls(self, keyfile=None, certfile=None): | def starttls(self, keyfile=None, certfile=None): | ||||
if not self.has_extn("starttls"): | if not self.has_extn("starttls"): | ||||
self.helo_resp = None | self.helo_resp = None | ||||
self.ehlo_resp = None | self.ehlo_resp = None | ||||
self.esmtp_features = {} | self.esmtp_features = {} | ||||
self.does_esmtp = 0 | self.does_esmtp = 0 | ||||
return (resp, reply) | return (resp, reply) | ||||
class SMTPS(smtplib.SMTP): | class SMTPS(smtplib.SMTP): | ||||
'''Derived class to verify the peer certificate for SMTPS. | """Derived class to verify the peer certificate for SMTPS. | ||||
This class allows to pass any keyword arguments to SSL socket creation. | This class allows to pass any keyword arguments to SSL socket creation. | ||||
''' | """ | ||||
def __init__(self, ui, keyfile=None, certfile=None, host=None, **kwargs): | def __init__(self, ui, keyfile=None, certfile=None, host=None, **kwargs): | ||||
self.keyfile = keyfile | self.keyfile = keyfile | ||||
self.certfile = certfile | self.certfile = certfile | ||||
smtplib.SMTP.__init__(self, **kwargs) | smtplib.SMTP.__init__(self, **kwargs) | ||||
self._host = host | self._host = host | ||||
self.default_port = smtplib.SMTP_SSL_PORT | self.default_port = smtplib.SMTP_SSL_PORT | ||||
self._ui = ui | self._ui = ui | ||||
% (encoding.strtolocal(sender), encoding.strtolocal(date)) | % (encoding.strtolocal(sender), encoding.strtolocal(date)) | ||||
) | ) | ||||
fp.write(msg) | fp.write(msg) | ||||
fp.write(b'\n\n') | fp.write(b'\n\n') | ||||
fp.close() | fp.close() | ||||
def connect(ui, mbox=None): | def connect(ui, mbox=None): | ||||
'''make a mail connection. return a function to send mail. | """make a mail connection. return a function to send mail. | ||||
call as sendmail(sender, list-of-recipients, msg).''' | call as sendmail(sender, list-of-recipients, msg).""" | ||||
if mbox: | if mbox: | ||||
open(mbox, b'wb').close() | open(mbox, b'wb').close() | ||||
return lambda s, r, m: _mbox(mbox, s, r, m) | return lambda s, r, m: _mbox(mbox, s, r, m) | ||||
if ui.config(b'email', b'method') == b'smtp': | if ui.config(b'email', b'method') == b'smtp': | ||||
return _smtp(ui) | return _smtp(ui) | ||||
return lambda s, r, m: _sendmail(ui, s, r, m) | return lambda s, r, m: _sendmail(ui, s, r, m) | ||||
# "latin1" normalizes to "iso8859-1", standard calls for "iso-8859-1" | # "latin1" normalizes to "iso8859-1", standard calls for "iso-8859-1" | ||||
if cs.startswith("iso") and not cs.startswith("iso-"): | if cs.startswith("iso") and not cs.startswith("iso-"): | ||||
return "iso-" + cs[3:] | return "iso-" + cs[3:] | ||||
return cs | return cs | ||||
def mimetextpatch(s, subtype='plain', display=False): | def mimetextpatch(s, subtype='plain', display=False): | ||||
# type: (bytes, str, bool) -> email.message.Message | # type: (bytes, str, bool) -> email.message.Message | ||||
'''Return MIME message suitable for a patch. | """Return MIME message suitable for a patch. | ||||
Charset will be detected by first trying to decode as us-ascii, then utf-8, | Charset will be detected by first trying to decode as us-ascii, then utf-8, | ||||
and finally the global encodings. If all those fail, fall back to | and finally the global encodings. If all those fail, fall back to | ||||
ISO-8859-1, an encoding with that allows all byte sequences. | ISO-8859-1, an encoding with that allows all byte sequences. | ||||
Transfer encodings will be used if necessary.''' | Transfer encodings will be used if necessary.""" | ||||
cs = [ | cs = [ | ||||
'us-ascii', | 'us-ascii', | ||||
'utf-8', | 'utf-8', | ||||
pycompat.sysstr(encoding.encoding), | pycompat.sysstr(encoding.encoding), | ||||
pycompat.sysstr(encoding.fallbackencoding), | pycompat.sysstr(encoding.fallbackencoding), | ||||
] | ] | ||||
if display: | if display: | ||||
cs = ['us-ascii'] | cs = ['us-ascii'] | ||||
for charset in cs: | for charset in cs: | ||||
try: | try: | ||||
s.decode(charset) | s.decode(charset) | ||||
return mimetextqp(s, subtype, codec2iana(charset)) | return mimetextqp(s, subtype, codec2iana(charset)) | ||||
except UnicodeDecodeError: | except UnicodeDecodeError: | ||||
pass | pass | ||||
return mimetextqp(s, subtype, "iso-8859-1") | return mimetextqp(s, subtype, "iso-8859-1") | ||||
def mimetextqp(body, subtype, charset): | def mimetextqp(body, subtype, charset): | ||||
# type: (bytes, str, str) -> email.message.Message | # type: (bytes, str, str) -> email.message.Message | ||||
'''Return MIME message. | """Return MIME message. | ||||
Quoted-printable transfer encoding will be used if necessary. | Quoted-printable transfer encoding will be used if necessary. | ||||
''' | """ | ||||
cs = email.charset.Charset(charset) | cs = email.charset.Charset(charset) | ||||
msg = email.message.Message() | msg = email.message.Message() | ||||
msg.set_type('text/' + subtype) | msg.set_type('text/' + subtype) | ||||
for line in body.splitlines(): | for line in body.splitlines(): | ||||
if len(line) > 950: | if len(line) > 950: | ||||
cs.body_encoding = email.charset.QP | cs.body_encoding = email.charset.QP | ||||
break | break | ||||
for cs in fallbacks: # find unique charsets while keeping order | for cs in fallbacks: # find unique charsets while keeping order | ||||
if cs not in charsets: | if cs not in charsets: | ||||
charsets.append(cs) | charsets.append(cs) | ||||
return [cs for cs in charsets if not cs.endswith('ascii')] | return [cs for cs in charsets if not cs.endswith('ascii')] | ||||
def _encode(ui, s, charsets): | def _encode(ui, s, charsets): | ||||
# type: (Any, bytes, List[str]) -> Tuple[bytes, str] | # type: (Any, bytes, List[str]) -> Tuple[bytes, str] | ||||
'''Returns (converted) string, charset tuple. | """Returns (converted) string, charset tuple. | ||||
Finds out best charset by cycling through sendcharsets in descending | Finds out best charset by cycling through sendcharsets in descending | ||||
order. Tries both encoding and fallbackencoding for input. Only as | order. Tries both encoding and fallbackencoding for input. Only as | ||||
last resort send as is in fake ascii. | last resort send as is in fake ascii. | ||||
Caveat: Do not use for mail parts containing patches!''' | Caveat: Do not use for mail parts containing patches!""" | ||||
sendcharsets = charsets or _charsets(ui) | sendcharsets = charsets or _charsets(ui) | ||||
if not isinstance(s, bytes): | if not isinstance(s, bytes): | ||||
# We have unicode data, which we need to try and encode to | # We have unicode data, which we need to try and encode to | ||||
# some reasonable-ish encoding. Try the encodings the user | # some reasonable-ish encoding. Try the encodings the user | ||||
# wants, and fall back to garbage-in-ascii. | # wants, and fall back to garbage-in-ascii. | ||||
for ocs in sendcharsets: | for ocs in sendcharsets: | ||||
try: | try: | ||||
return s.encode(ocs), ocs | return s.encode(ocs), ocs | ||||
if display or not address: | if display or not address: | ||||
return encoding.strfromlocal(address or b'') | return encoding.strfromlocal(address or b'') | ||||
name, addr = email.utils.parseaddr(encoding.strfromlocal(address)) | name, addr = email.utils.parseaddr(encoding.strfromlocal(address)) | ||||
return _addressencode(ui, name, addr, charsets) | return _addressencode(ui, name, addr, charsets) | ||||
def addrlistencode(ui, addrs, charsets=None, display=False): | def addrlistencode(ui, addrs, charsets=None, display=False): | ||||
# type: (Any, List[bytes], List[str], bool) -> List[str] | # type: (Any, List[bytes], List[str], bool) -> List[str] | ||||
'''Turns a list of addresses into a list of RFC-2047 compliant headers. | """Turns a list of addresses into a list of RFC-2047 compliant headers. | ||||
A single element of input list may contain multiple addresses, but output | A single element of input list may contain multiple addresses, but output | ||||
always has one address per item''' | always has one address per item""" | ||||
straddrs = [] | straddrs = [] | ||||
for a in addrs: | for a in addrs: | ||||
assert isinstance(a, bytes), '%r unexpectedly not a bytestr' % a | assert isinstance(a, bytes), '%r unexpectedly not a bytestr' % a | ||||
straddrs.append(encoding.strfromlocal(a)) | straddrs.append(encoding.strfromlocal(a)) | ||||
if display: | if display: | ||||
return [a.strip() for a in straddrs if a.strip()] | return [a.strip() for a in straddrs if a.strip()] | ||||
result = [] | result = [] | ||||
for name, addr in email.utils.getaddresses(straddrs): | for name, addr in email.utils.getaddresses(straddrs): | ||||
if name or addr: | if name or addr: | ||||
r = _addressencode(ui, name, addr, charsets) | r = _addressencode(ui, name, addr, charsets) | ||||
result.append(r) | result.append(r) | ||||
return result | return result | ||||
def mimeencode(ui, s, charsets=None, display=False): | def mimeencode(ui, s, charsets=None, display=False): | ||||
# type: (Any, bytes, List[str], bool) -> email.message.Message | # type: (Any, bytes, List[str], bool) -> email.message.Message | ||||
'''creates mime text object, encodes it if needed, and sets | """creates mime text object, encodes it if needed, and sets | ||||
charset and transfer-encoding accordingly.''' | charset and transfer-encoding accordingly.""" | ||||
cs = 'us-ascii' | cs = 'us-ascii' | ||||
if not display: | if not display: | ||||
s, cs = _encode(ui, s, charsets) | s, cs = _encode(ui, s, charsets) | ||||
return mimetextqp(s, 'plain', cs) | return mimetextqp(s, 'plain', cs) | ||||
if pycompat.ispy3: | if pycompat.ispy3: | ||||
def dirs(self): | def dirs(self): | ||||
return self._dirs | return self._dirs | ||||
def hasdir(self, dir): | def hasdir(self, dir): | ||||
return dir in self._dirs | return dir in self._dirs | ||||
def _filesfastpath(self, match): | def _filesfastpath(self, match): | ||||
'''Checks whether we can correctly and quickly iterate over matcher | """Checks whether we can correctly and quickly iterate over matcher | ||||
files instead of over manifest files.''' | files instead of over manifest files.""" | ||||
files = match.files() | files = match.files() | ||||
return len(files) < 100 and ( | return len(files) < 100 and ( | ||||
match.isexact() | match.isexact() | ||||
or (match.prefix() and all(fn in self for fn in files)) | or (match.prefix() and all(fn in self for fn in files)) | ||||
) | ) | ||||
def walk(self, match): | def walk(self, match): | ||||
'''Generates matching file names. | """Generates matching file names. | ||||
Equivalent to manifest.matches(match).iterkeys(), but without creating | Equivalent to manifest.matches(match).iterkeys(), but without creating | ||||
an entirely new manifest. | an entirely new manifest. | ||||
It also reports nonexistent files by marking them bad with match.bad(). | It also reports nonexistent files by marking them bad with match.bad(). | ||||
''' | """ | ||||
if match.always(): | if match.always(): | ||||
for f in iter(self): | for f in iter(self): | ||||
yield f | yield f | ||||
return | return | ||||
fset = set(match.files()) | fset = set(match.files()) | ||||
# avoid the entire walk if we're only looking for specific files | # avoid the entire walk if we're only looking for specific files | ||||
m._lm[fn] = lm[fn] | m._lm[fn] = lm[fn] | ||||
return m | return m | ||||
m = manifestdict() | m = manifestdict() | ||||
m._lm = self._lm.filtercopy(match) | m._lm = self._lm.filtercopy(match) | ||||
return m | return m | ||||
def diff(self, m2, match=None, clean=False): | def diff(self, m2, match=None, clean=False): | ||||
'''Finds changes between the current manifest and m2. | """Finds changes between the current manifest and m2. | ||||
Args: | Args: | ||||
m2: the manifest to which this manifest should be compared. | m2: the manifest to which this manifest should be compared. | ||||
clean: if true, include files unchanged between these manifests | clean: if true, include files unchanged between these manifests | ||||
with a None value in the returned dictionary. | with a None value in the returned dictionary. | ||||
The result is returned as a dict with filename as key and | The result is returned as a dict with filename as key and | ||||
values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the | values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the | ||||
nodeid in the current/other manifest and fl1/fl2 is the flag | nodeid in the current/other manifest and fl1/fl2 is the flag | ||||
in the current/other manifest. Where the file does not exist, | in the current/other manifest. Where the file does not exist, | ||||
the nodeid will be None and the flags will be the empty | the nodeid will be None and the flags will be the empty | ||||
string. | string. | ||||
''' | """ | ||||
if match: | if match: | ||||
m1 = self._matches(match) | m1 = self._matches(match) | ||||
m2 = m2._matches(match) | m2 = m2._matches(match) | ||||
return m1.diff(m2, clean=clean) | return m1.diff(m2, clean=clean) | ||||
return self._lm.diff(m2._lm, clean) | return self._lm.diff(m2._lm, clean) | ||||
def setflag(self, key, flag): | def setflag(self, key, flag): | ||||
if flag not in _manifestflags: | if flag not in _manifestflags: | ||||
deltatext = mdiff.textdiff( | deltatext = mdiff.textdiff( | ||||
util.buffer(base), util.buffer(arraytext) | util.buffer(base), util.buffer(arraytext) | ||||
) | ) | ||||
return arraytext, deltatext | return arraytext, deltatext | ||||
def _msearch(m, s, lo=0, hi=None): | def _msearch(m, s, lo=0, hi=None): | ||||
'''return a tuple (start, end) that says where to find s within m. | """return a tuple (start, end) that says where to find s within m. | ||||
If the string is found m[start:end] are the line containing | If the string is found m[start:end] are the line containing | ||||
that string. If start == end the string was not found and | that string. If start == end the string was not found and | ||||
they indicate the proper sorted insertion point. | they indicate the proper sorted insertion point. | ||||
m should be a buffer, a memoryview or a byte string. | m should be a buffer, a memoryview or a byte string. | ||||
s is a byte string''' | s is a byte string""" | ||||
def advance(i, c): | def advance(i, c): | ||||
while i < lenm and m[i : i + 1] != c: | while i < lenm and m[i : i + 1] != c: | ||||
i += 1 | i += 1 | ||||
return i | return i | ||||
if not s: | if not s: | ||||
return (lo, lo) | return (lo, lo) | ||||
hex(self._node), | hex(self._node), | ||||
bool(self._loadfunc is _noop), | bool(self._loadfunc is _noop), | ||||
self._dirty, | self._dirty, | ||||
id(self), | id(self), | ||||
) | ) | ||||
) | ) | ||||
def dir(self): | def dir(self): | ||||
'''The directory that this tree manifest represents, including a | """The directory that this tree manifest represents, including a | ||||
trailing '/'. Empty string for the repo root directory.''' | trailing '/'. Empty string for the repo root directory.""" | ||||
return self._dir | return self._dir | ||||
def node(self): | def node(self): | ||||
'''This node of this instance. nullid for unsaved instances. Should | """This node of this instance. nullid for unsaved instances. Should | ||||
be updated when the instance is read or written from a revlog. | be updated when the instance is read or written from a revlog. | ||||
''' | """ | ||||
assert not self._dirty | assert not self._dirty | ||||
return self._node | return self._node | ||||
def setnode(self, node): | def setnode(self, node): | ||||
self._node = node | self._node = node | ||||
self._dirty = False | self._dirty = False | ||||
def iterentries(self): | def iterentries(self): | ||||
self._loadlazy(topdir) | self._loadlazy(topdir) | ||||
if topdir in self._dirs: | if topdir in self._dirs: | ||||
return self._dirs[topdir].hasdir(subdir) | return self._dirs[topdir].hasdir(subdir) | ||||
return False | return False | ||||
dirslash = dir + b'/' | dirslash = dir + b'/' | ||||
return dirslash in self._dirs or dirslash in self._lazydirs | return dirslash in self._dirs or dirslash in self._lazydirs | ||||
def walk(self, match): | def walk(self, match): | ||||
'''Generates matching file names. | """Generates matching file names. | ||||
It also reports nonexistent files by marking them bad with match.bad(). | It also reports nonexistent files by marking them bad with match.bad(). | ||||
''' | """ | ||||
if match.always(): | if match.always(): | ||||
for f in iter(self): | for f in iter(self): | ||||
yield f | yield f | ||||
return | return | ||||
fset = set(match.files()) | fset = set(match.files()) | ||||
for fn in self._walk(match): | for fn in self._walk(match): | ||||
if match(fullp): | if match(fullp): | ||||
yield fullp | yield fullp | ||||
else: | else: | ||||
if not visit or p[:-1] in visit: | if not visit or p[:-1] in visit: | ||||
for f in self._dirs[p]._walk(match): | for f in self._dirs[p]._walk(match): | ||||
yield f | yield f | ||||
def _matches(self, match): | def _matches(self, match): | ||||
'''recursively generate a new manifest filtered by the match argument. | """recursively generate a new manifest filtered by the match argument.""" | ||||
''' | |||||
if match.always(): | if match.always(): | ||||
return self.copy() | return self.copy() | ||||
return self._matches_inner(match) | return self._matches_inner(match) | ||||
def _matches_inner(self, match): | def _matches_inner(self, match): | ||||
if match.always(): | if match.always(): | ||||
return self.copy() | return self.copy() | ||||
if not ret._isempty(): | if not ret._isempty(): | ||||
ret._dirty = True | ret._dirty = True | ||||
return ret | return ret | ||||
def fastdelta(self, base, changes): | def fastdelta(self, base, changes): | ||||
raise FastdeltaUnavailable() | raise FastdeltaUnavailable() | ||||
def diff(self, m2, match=None, clean=False): | def diff(self, m2, match=None, clean=False): | ||||
'''Finds changes between the current manifest and m2. | """Finds changes between the current manifest and m2. | ||||
Args: | Args: | ||||
m2: the manifest to which this manifest should be compared. | m2: the manifest to which this manifest should be compared. | ||||
clean: if true, include files unchanged between these manifests | clean: if true, include files unchanged between these manifests | ||||
with a None value in the returned dictionary. | with a None value in the returned dictionary. | ||||
The result is returned as a dict with filename as key and | The result is returned as a dict with filename as key and | ||||
values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the | values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the | ||||
nodeid in the current/other manifest and fl1/fl2 is the flag | nodeid in the current/other manifest and fl1/fl2 is the flag | ||||
in the current/other manifest. Where the file does not exist, | in the current/other manifest. Where the file does not exist, | ||||
the nodeid will be None and the flags will be the empty | the nodeid will be None and the flags will be the empty | ||||
string. | string. | ||||
''' | """ | ||||
if match and not match.always(): | if match and not match.always(): | ||||
m1 = self._matches(match) | m1 = self._matches(match) | ||||
m2 = m2._matches(match) | m2 = m2._matches(match) | ||||
return m1.diff(m2, clean=clean) | return m1.diff(m2, clean=clean) | ||||
result = {} | result = {} | ||||
emptytree = treemanifest() | emptytree = treemanifest() | ||||
def _iterativediff(t1, t2, stack): | def _iterativediff(t1, t2, stack): | ||||
class FastdeltaUnavailable(Exception): | class FastdeltaUnavailable(Exception): | ||||
"""Exception raised when fastdelta isn't usable on a manifest.""" | """Exception raised when fastdelta isn't usable on a manifest.""" | ||||
@interfaceutil.implementer(repository.imanifeststorage) | @interfaceutil.implementer(repository.imanifeststorage) | ||||
class manifestrevlog(object): | class manifestrevlog(object): | ||||
'''A revlog that stores manifest texts. This is responsible for caching the | """A revlog that stores manifest texts. This is responsible for caching the | ||||
full-text manifest contents. | full-text manifest contents. | ||||
''' | """ | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
opener, | opener, | ||||
tree=b'', | tree=b'', | ||||
dirlogcache=None, | dirlogcache=None, | ||||
indexfile=None, | indexfile=None, | ||||
treemanifest=False, | treemanifest=False, | ||||
else: | else: | ||||
text = store.revision(self._node) | text = store.revision(self._node) | ||||
arraytext = bytearray(text) | arraytext = bytearray(text) | ||||
store.fulltextcache[self._node] = arraytext | store.fulltextcache[self._node] = arraytext | ||||
self._data = manifestdict(text) | self._data = manifestdict(text) | ||||
return self._data | return self._data | ||||
def readfast(self, shallow=False): | def readfast(self, shallow=False): | ||||
'''Calls either readdelta or read, based on which would be less work. | """Calls either readdelta or read, based on which would be less work. | ||||
readdelta is called if the delta is against the p1, and therefore can be | readdelta is called if the delta is against the p1, and therefore can be | ||||
read quickly. | read quickly. | ||||
If `shallow` is True, nothing changes since this is a flat manifest. | If `shallow` is True, nothing changes since this is a flat manifest. | ||||
''' | """ | ||||
store = self._storage() | store = self._storage() | ||||
r = store.rev(self._node) | r = store.rev(self._node) | ||||
deltaparent = store.deltaparent(r) | deltaparent = store.deltaparent(r) | ||||
if deltaparent != nullrev and deltaparent in store.parentrevs(r): | if deltaparent != nullrev and deltaparent in store.parentrevs(r): | ||||
return self.readdelta() | return self.readdelta() | ||||
return self.read() | return self.read() | ||||
def readdelta(self, shallow=False): | def readdelta(self, shallow=False): | ||||
'''Returns a manifest containing just the entries that are present | """Returns a manifest containing just the entries that are present | ||||
in this manifest, but not in its p1 manifest. This is efficient to read | in this manifest, but not in its p1 manifest. This is efficient to read | ||||
if the revlog delta is already p1. | if the revlog delta is already p1. | ||||
Changing the value of `shallow` has no effect on flat manifests. | Changing the value of `shallow` has no effect on flat manifests. | ||||
''' | """ | ||||
store = self._storage() | store = self._storage() | ||||
r = store.rev(self._node) | r = store.rev(self._node) | ||||
d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r)) | d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r)) | ||||
return manifestdict(d) | return manifestdict(d) | ||||
def find(self, key): | def find(self, key): | ||||
return self.read().find(key) | return self.read().find(key) | ||||
memmf._treemanifest = self.read().copy() | memmf._treemanifest = self.read().copy() | ||||
return memmf | return memmf | ||||
@propertycache | @propertycache | ||||
def parents(self): | def parents(self): | ||||
return self._storage().parents(self._node) | return self._storage().parents(self._node) | ||||
def readdelta(self, shallow=False): | def readdelta(self, shallow=False): | ||||
'''Returns a manifest containing just the entries that are present | """Returns a manifest containing just the entries that are present | ||||
in this manifest, but not in its p1 manifest. This is efficient to read | in this manifest, but not in its p1 manifest. This is efficient to read | ||||
if the revlog delta is already p1. | if the revlog delta is already p1. | ||||
If `shallow` is True, this will read the delta for this directory, | If `shallow` is True, this will read the delta for this directory, | ||||
without recursively reading subdirectory manifests. Instead, any | without recursively reading subdirectory manifests. Instead, any | ||||
subdirectory entry will be reported as it appears in the manifest, i.e. | subdirectory entry will be reported as it appears in the manifest, i.e. | ||||
the subdirectory will be reported among files and distinguished only by | the subdirectory will be reported among files and distinguished only by | ||||
its 't' flag. | its 't' flag. | ||||
''' | """ | ||||
store = self._storage() | store = self._storage() | ||||
if shallow: | if shallow: | ||||
r = store.rev(self._node) | r = store.rev(self._node) | ||||
d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r)) | d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r)) | ||||
return manifestdict(d) | return manifestdict(d) | ||||
else: | else: | ||||
# Need to perform a slow delta | # Need to perform a slow delta | ||||
r0 = store.deltaparent(store.rev(self._node)) | r0 = store.deltaparent(store.rev(self._node)) | ||||
m0 = self._manifestlog.get(self._dir, store.node(r0)).read() | m0 = self._manifestlog.get(self._dir, store.node(r0)).read() | ||||
m1 = self.read() | m1 = self.read() | ||||
md = treemanifest(dir=self._dir) | md = treemanifest(dir=self._dir) | ||||
for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)): | for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)): | ||||
if n1: | if n1: | ||||
md[f] = n1 | md[f] = n1 | ||||
if fl1: | if fl1: | ||||
md.setflag(f, fl1) | md.setflag(f, fl1) | ||||
return md | return md | ||||
def readfast(self, shallow=False): | def readfast(self, shallow=False): | ||||
'''Calls either readdelta or read, based on which would be less work. | """Calls either readdelta or read, based on which would be less work. | ||||
readdelta is called if the delta is against the p1, and therefore can be | readdelta is called if the delta is against the p1, and therefore can be | ||||
read quickly. | read quickly. | ||||
If `shallow` is True, it only returns the entries from this manifest, | If `shallow` is True, it only returns the entries from this manifest, | ||||
and not any submanifests. | and not any submanifests. | ||||
''' | """ | ||||
store = self._storage() | store = self._storage() | ||||
r = store.rev(self._node) | r = store.rev(self._node) | ||||
deltaparent = store.deltaparent(r) | deltaparent = store.deltaparent(r) | ||||
if deltaparent != nullrev and deltaparent in store.parentrevs(r): | if deltaparent != nullrev and deltaparent in store.parentrevs(r): | ||||
return self.readdelta(shallow=shallow) | return self.readdelta(shallow=shallow) | ||||
if shallow: | if shallow: | ||||
return manifestdict(store.revision(self._node)) | return manifestdict(store.revision(self._node)) |
b'rootfilesin', | b'rootfilesin', | ||||
) | ) | ||||
cwdrelativepatternkinds = (b'relpath', b'glob') | cwdrelativepatternkinds = (b'relpath', b'glob') | ||||
propertycache = util.propertycache | propertycache = util.propertycache | ||||
def _rematcher(regex): | def _rematcher(regex): | ||||
'''compile the regexp with the best available regexp engine and return a | """compile the regexp with the best available regexp engine and return a | ||||
matcher function''' | matcher function""" | ||||
m = util.re.compile(regex) | m = util.re.compile(regex) | ||||
try: | try: | ||||
# slightly faster, provided by facebook's re2 bindings | # slightly faster, provided by facebook's re2 bindings | ||||
return m.test_match | return m.test_match | ||||
except AttributeError: | except AttributeError: | ||||
return m.match | return m.match | ||||
matchers.append(pm) | matchers.append(pm) | ||||
continue | continue | ||||
other.append((kind, pat, source)) | other.append((kind, pat, source)) | ||||
return matchers, other | return matchers, other | ||||
def _expandsubinclude(kindpats, root): | def _expandsubinclude(kindpats, root): | ||||
'''Returns the list of subinclude matcher args and the kindpats without the | """Returns the list of subinclude matcher args and the kindpats without the | ||||
subincludes in it.''' | subincludes in it.""" | ||||
relmatchers = [] | relmatchers = [] | ||||
other = [] | other = [] | ||||
for kind, pat, source in kindpats: | for kind, pat, source in kindpats: | ||||
if kind == b'subinclude': | if kind == b'subinclude': | ||||
sourceroot = pathutil.dirname(util.normpath(source)) | sourceroot = pathutil.dirname(util.normpath(source)) | ||||
pat = util.pconvert(pat) | pat = util.pconvert(pat) | ||||
path = pathutil.join(sourceroot, pat) | path = pathutil.join(sourceroot, pat) | ||||
newroot = pathutil.dirname(path) | newroot = pathutil.dirname(path) | ||||
matcherargs = (newroot, b'', [], [b'include:%s' % path]) | matcherargs = (newroot, b'', [], [b'include:%s' % path]) | ||||
prefix = pathutil.canonpath(root, root, newroot) | prefix = pathutil.canonpath(root, root, newroot) | ||||
if prefix: | if prefix: | ||||
prefix += b'/' | prefix += b'/' | ||||
relmatchers.append((prefix, matcherargs)) | relmatchers.append((prefix, matcherargs)) | ||||
else: | else: | ||||
other.append((kind, pat, source)) | other.append((kind, pat, source)) | ||||
return relmatchers, other | return relmatchers, other | ||||
def _kindpatsalwaysmatch(kindpats): | def _kindpatsalwaysmatch(kindpats): | ||||
""""Checks whether the kindspats match everything, as e.g. | """ "Checks whether the kindspats match everything, as e.g. | ||||
'relpath:.' does. | 'relpath:.' does. | ||||
""" | """ | ||||
for kind, pat, source in kindpats: | for kind, pat, source in kindpats: | ||||
if pat != b'' or kind not in [b'relpath', b'glob']: | if pat != b'' or kind not in [b'relpath', b'glob']: | ||||
return False | return False | ||||
return True | return True | ||||
def _buildkindpatsmatcher( | def _buildkindpatsmatcher( | ||||
matchercls, root, cwd, kindpats, ctx=None, listsubrepos=False, badfn=None, | matchercls, | ||||
root, | |||||
cwd, | |||||
kindpats, | |||||
ctx=None, | |||||
listsubrepos=False, | |||||
badfn=None, | |||||
): | ): | ||||
matchers = [] | matchers = [] | ||||
fms, kindpats = _expandsets( | fms, kindpats = _expandsets( | ||||
cwd, kindpats, ctx=ctx, listsubrepos=listsubrepos, badfn=badfn, | cwd, | ||||
kindpats, | |||||
ctx=ctx, | |||||
listsubrepos=listsubrepos, | |||||
badfn=badfn, | |||||
) | ) | ||||
if kindpats: | if kindpats: | ||||
m = matchercls(root, kindpats, badfn=badfn) | m = matchercls(root, kindpats, badfn=badfn) | ||||
matchers.append(m) | matchers.append(m) | ||||
if fms: | if fms: | ||||
matchers.extend(fms) | matchers.extend(fms) | ||||
if not matchers: | if not matchers: | ||||
return nevermatcher(badfn=badfn) | return nevermatcher(badfn=badfn) | ||||
one. | one. | ||||
""" | """ | ||||
m = copy.copy(match) | m = copy.copy(match) | ||||
m.bad = badfn | m.bad = badfn | ||||
return m | return m | ||||
def _donormalize(patterns, default, root, cwd, auditor=None, warn=None): | def _donormalize(patterns, default, root, cwd, auditor=None, warn=None): | ||||
'''Convert 'kind:pat' from the patterns list to tuples with kind and | """Convert 'kind:pat' from the patterns list to tuples with kind and | ||||
normalized and rooted patterns and with listfiles expanded.''' | normalized and rooted patterns and with listfiles expanded.""" | ||||
kindpats = [] | kindpats = [] | ||||
for kind, pat in [_patsplit(p, default) for p in patterns]: | for kind, pat in [_patsplit(p, default) for p in patterns]: | ||||
if kind in cwdrelativepatternkinds: | if kind in cwdrelativepatternkinds: | ||||
pat = pathutil.canonpath(root, cwd, pat, auditor=auditor) | pat = pathutil.canonpath(root, cwd, pat, auditor=auditor) | ||||
elif kind in (b'relglob', b'path', b'rootfilesin', b'rootglob'): | elif kind in (b'relglob', b'path', b'rootfilesin', b'rootglob'): | ||||
pat = util.normpath(pat) | pat = util.normpath(pat) | ||||
elif kind in (b'listfile', b'listfile0'): | elif kind in (b'listfile', b'listfile0'): | ||||
try: | try: | ||||
self.bad = badfn | self.bad = badfn | ||||
def __call__(self, fn): | def __call__(self, fn): | ||||
return self.matchfn(fn) | return self.matchfn(fn) | ||||
# Callbacks related to how the matcher is used by dirstate.walk. | # Callbacks related to how the matcher is used by dirstate.walk. | ||||
# Subscribers to these events must monkeypatch the matcher object. | # Subscribers to these events must monkeypatch the matcher object. | ||||
def bad(self, f, msg): | def bad(self, f, msg): | ||||
'''Callback from dirstate.walk for each explicit file that can't be | """Callback from dirstate.walk for each explicit file that can't be | ||||
found/accessed, with an error message.''' | found/accessed, with an error message.""" | ||||
# If an traversedir is set, it will be called when a directory discovered | # If an traversedir is set, it will be called when a directory discovered | ||||
# by recursive traversal is visited. | # by recursive traversal is visited. | ||||
traversedir = None | traversedir = None | ||||
@propertycache | @propertycache | ||||
def _files(self): | def _files(self): | ||||
return [] | return [] | ||||
def files(self): | def files(self): | ||||
'''Explicitly listed files or patterns or roots: | """Explicitly listed files or patterns or roots: | ||||
if no patterns or .always(): empty list, | if no patterns or .always(): empty list, | ||||
if exact: list exact files, | if exact: list exact files, | ||||
if not .anypats(): list all files and dirs, | if not .anypats(): list all files and dirs, | ||||
else: optimal roots''' | else: optimal roots""" | ||||
return self._files | return self._files | ||||
@propertycache | @propertycache | ||||
def _fileset(self): | def _fileset(self): | ||||
return set(self._files) | return set(self._files) | ||||
def exact(self, f): | def exact(self, f): | ||||
'''Returns True if f is in .files().''' | '''Returns True if f is in .files().''' | ||||
return f in self._fileset | return f in self._fileset | ||||
def matchfn(self, f): | def matchfn(self, f): | ||||
return False | return False | ||||
def visitdir(self, dir): | def visitdir(self, dir): | ||||
'''Decides whether a directory should be visited based on whether it | """Decides whether a directory should be visited based on whether it | ||||
has potential matches in it or one of its subdirectories. This is | has potential matches in it or one of its subdirectories. This is | ||||
based on the match's primary, included, and excluded patterns. | based on the match's primary, included, and excluded patterns. | ||||
Returns the string 'all' if the given directory and all subdirectories | Returns the string 'all' if the given directory and all subdirectories | ||||
should be visited. Otherwise returns True or False indicating whether | should be visited. Otherwise returns True or False indicating whether | ||||
the given directory should be visited. | the given directory should be visited. | ||||
''' | """ | ||||
return True | return True | ||||
def visitchildrenset(self, dir): | def visitchildrenset(self, dir): | ||||
'''Decides whether a directory should be visited based on whether it | """Decides whether a directory should be visited based on whether it | ||||
has potential matches in it or one of its subdirectories, and | has potential matches in it or one of its subdirectories, and | ||||
potentially lists which subdirectories of that directory should be | potentially lists which subdirectories of that directory should be | ||||
visited. This is based on the match's primary, included, and excluded | visited. This is based on the match's primary, included, and excluded | ||||
patterns. | patterns. | ||||
This function is very similar to 'visitdir', and the following mapping | This function is very similar to 'visitdir', and the following mapping | ||||
can be applied: | can be applied: | ||||
Most matchers do not know if they're representing files or | Most matchers do not know if they're representing files or | ||||
directories. They see ['path:dir/f'] and don't know whether 'f' is a | directories. They see ['path:dir/f'] and don't know whether 'f' is a | ||||
file or a directory, so visitchildrenset('dir') for most matchers will | file or a directory, so visitchildrenset('dir') for most matchers will | ||||
return {'f'}, but if the matcher knows it's a file (like exactmatcher | return {'f'}, but if the matcher knows it's a file (like exactmatcher | ||||
does), it may return 'this'. Do not rely on the return being a set | does), it may return 'this'. Do not rely on the return being a set | ||||
indicating that there are no files in this dir to investigate (or | indicating that there are no files in this dir to investigate (or | ||||
equivalently that if there are files to investigate in 'dir' that it | equivalently that if there are files to investigate in 'dir' that it | ||||
will always return 'this'). | will always return 'this'). | ||||
''' | """ | ||||
return b'this' | return b'this' | ||||
def always(self): | def always(self): | ||||
'''Matcher will match everything and .files() will be empty -- | """Matcher will match everything and .files() will be empty -- | ||||
optimization might be possible.''' | optimization might be possible.""" | ||||
return False | return False | ||||
def isexact(self): | def isexact(self): | ||||
'''Matcher will match exactly the list of files in .files() -- | """Matcher will match exactly the list of files in .files() -- | ||||
optimization might be possible.''' | optimization might be possible.""" | ||||
return False | return False | ||||
def prefix(self): | def prefix(self): | ||||
'''Matcher will match the paths in .files() recursively -- | """Matcher will match the paths in .files() recursively -- | ||||
optimization might be possible.''' | optimization might be possible.""" | ||||
return False | return False | ||||
def anypats(self): | def anypats(self): | ||||
'''None of .always(), .isexact(), and .prefix() is true -- | """None of .always(), .isexact(), and .prefix() is true -- | ||||
optimizations will be difficult.''' | optimizations will be difficult.""" | ||||
return not self.always() and not self.isexact() and not self.prefix() | return not self.always() and not self.isexact() and not self.prefix() | ||||
class alwaysmatcher(basematcher): | class alwaysmatcher(basematcher): | ||||
'''Matches everything.''' | '''Matches everything.''' | ||||
def __init__(self, badfn=None): | def __init__(self, badfn=None): | ||||
super(alwaysmatcher, self).__init__(badfn) | super(alwaysmatcher, self).__init__(badfn) | ||||
return set() | return set() | ||||
@encoding.strmethod | @encoding.strmethod | ||||
def __repr__(self): | def __repr__(self): | ||||
return b'<includematcher includes=%r>' % pycompat.bytestr(self._pats) | return b'<includematcher includes=%r>' % pycompat.bytestr(self._pats) | ||||
class exactmatcher(basematcher): | class exactmatcher(basematcher): | ||||
r'''Matches the input files exactly. They are interpreted as paths, not | r"""Matches the input files exactly. They are interpreted as paths, not | ||||
patterns (so no kind-prefixes). | patterns (so no kind-prefixes). | ||||
>>> m = exactmatcher([b'a.txt', br're:.*\.c$']) | >>> m = exactmatcher([b'a.txt', br're:.*\.c$']) | ||||
>>> m(b'a.txt') | >>> m(b'a.txt') | ||||
True | True | ||||
>>> m(b'b.txt') | >>> m(b'b.txt') | ||||
False | False | ||||
Input files that would be matched are exactly those returned by .files() | Input files that would be matched are exactly those returned by .files() | ||||
>>> m.files() | >>> m.files() | ||||
['a.txt', 're:.*\\.c$'] | ['a.txt', 're:.*\\.c$'] | ||||
So pattern 're:.*\.c$' is not considered as a regex, but as a file name | So pattern 're:.*\.c$' is not considered as a regex, but as a file name | ||||
>>> m(b'main.c') | >>> m(b'main.c') | ||||
False | False | ||||
>>> m(br're:.*\.c$') | >>> m(br're:.*\.c$') | ||||
True | True | ||||
''' | """ | ||||
def __init__(self, files, badfn=None): | def __init__(self, files, badfn=None): | ||||
super(exactmatcher, self).__init__(badfn) | super(exactmatcher, self).__init__(badfn) | ||||
if isinstance(files, list): | if isinstance(files, list): | ||||
self._files = files | self._files = files | ||||
else: | else: | ||||
self._files = list(files) | self._files = list(files) | ||||
return True | return True | ||||
@encoding.strmethod | @encoding.strmethod | ||||
def __repr__(self): | def __repr__(self): | ||||
return b'<exactmatcher files=%r>' % self._files | return b'<exactmatcher files=%r>' % self._files | ||||
class differencematcher(basematcher): | class differencematcher(basematcher): | ||||
'''Composes two matchers by matching if the first matches and the second | """Composes two matchers by matching if the first matches and the second | ||||
does not. | does not. | ||||
The second matcher's non-matching-attributes (bad, traversedir) are ignored. | The second matcher's non-matching-attributes (bad, traversedir) are ignored. | ||||
''' | """ | ||||
def __init__(self, m1, m2): | def __init__(self, m1, m2): | ||||
super(differencematcher, self).__init__() | super(differencematcher, self).__init__() | ||||
self._m1 = m1 | self._m1 = m1 | ||||
self._m2 = m2 | self._m2 = m2 | ||||
self.bad = m1.bad | self.bad = m1.bad | ||||
self.traversedir = m1.traversedir | self.traversedir = m1.traversedir | ||||
return self._m1.isexact() | return self._m1.isexact() | ||||
@encoding.strmethod | @encoding.strmethod | ||||
def __repr__(self): | def __repr__(self): | ||||
return b'<differencematcher m1=%r, m2=%r>' % (self._m1, self._m2) | return b'<differencematcher m1=%r, m2=%r>' % (self._m1, self._m2) | ||||
def intersectmatchers(m1, m2): | def intersectmatchers(m1, m2): | ||||
'''Composes two matchers by matching if both of them match. | """Composes two matchers by matching if both of them match. | ||||
The second matcher's non-matching-attributes (bad, traversedir) are ignored. | The second matcher's non-matching-attributes (bad, traversedir) are ignored. | ||||
''' | """ | ||||
if m1 is None or m2 is None: | if m1 is None or m2 is None: | ||||
return m1 or m2 | return m1 or m2 | ||||
if m1.always(): | if m1.always(): | ||||
m = copy.copy(m2) | m = copy.copy(m2) | ||||
# TODO: Consider encapsulating these things in a class so there's only | # TODO: Consider encapsulating these things in a class so there's only | ||||
# one thing to copy from m1. | # one thing to copy from m1. | ||||
m.bad = m1.bad | m.bad = m1.bad | ||||
m.traversedir = m1.traversedir | m.traversedir = m1.traversedir | ||||
return r | return r | ||||
@encoding.strmethod | @encoding.strmethod | ||||
def __repr__(self): | def __repr__(self): | ||||
return b'<unionmatcher matchers=%r>' % self._matchers | return b'<unionmatcher matchers=%r>' % self._matchers | ||||
def patkind(pattern, default=None): | def patkind(pattern, default=None): | ||||
r'''If pattern is 'kind:pat' with a known kind, return kind. | r"""If pattern is 'kind:pat' with a known kind, return kind. | ||||
>>> patkind(br're:.*\.c$') | >>> patkind(br're:.*\.c$') | ||||
're' | 're' | ||||
>>> patkind(b'glob:*.c') | >>> patkind(b'glob:*.c') | ||||
'glob' | 'glob' | ||||
>>> patkind(b'relpath:test.py') | >>> patkind(b'relpath:test.py') | ||||
'relpath' | 'relpath' | ||||
>>> patkind(b'main.py') | >>> patkind(b'main.py') | ||||
>>> patkind(b'main.py', default=b're') | >>> patkind(b'main.py', default=b're') | ||||
're' | 're' | ||||
''' | """ | ||||
return _patsplit(pattern, default)[0] | return _patsplit(pattern, default)[0] | ||||
def _patsplit(pattern, default): | def _patsplit(pattern, default): | ||||
"""Split a string into the optional pattern kind prefix and the actual | """Split a string into the optional pattern kind prefix and the actual | ||||
pattern.""" | pattern.""" | ||||
if b':' in pattern: | if b':' in pattern: | ||||
kind, pat = pattern.split(b':', 1) | kind, pat = pattern.split(b':', 1) | ||||
if kind in allpatternkinds: | if kind in allpatternkinds: | ||||
return kind, pat | return kind, pat | ||||
return default, pattern | return default, pattern | ||||
def _globre(pat): | def _globre(pat): | ||||
r'''Convert an extended glob string to a regexp string. | r"""Convert an extended glob string to a regexp string. | ||||
>>> from . import pycompat | >>> from . import pycompat | ||||
>>> def bprint(s): | >>> def bprint(s): | ||||
... print(pycompat.sysstr(s)) | ... print(pycompat.sysstr(s)) | ||||
>>> bprint(_globre(br'?')) | >>> bprint(_globre(br'?')) | ||||
. | . | ||||
>>> bprint(_globre(br'*')) | >>> bprint(_globre(br'*')) | ||||
[^/]* | [^/]* | ||||
>>> bprint(_globre(br'**')) | >>> bprint(_globre(br'**')) | ||||
.* | .* | ||||
>>> bprint(_globre(br'**/a')) | >>> bprint(_globre(br'**/a')) | ||||
(?:.*/)?a | (?:.*/)?a | ||||
>>> bprint(_globre(br'a/**/b')) | >>> bprint(_globre(br'a/**/b')) | ||||
a/(?:.*/)?b | a/(?:.*/)?b | ||||
>>> bprint(_globre(br'[a*?!^][^b][!c]')) | >>> bprint(_globre(br'[a*?!^][^b][!c]')) | ||||
[a*?!^][\^b][^c] | [a*?!^][\^b][^c] | ||||
>>> bprint(_globre(br'{a,b}')) | >>> bprint(_globre(br'{a,b}')) | ||||
(?:a|b) | (?:a|b) | ||||
>>> bprint(_globre(br'.\*\?')) | >>> bprint(_globre(br'.\*\?')) | ||||
\.\*\? | \.\*\? | ||||
''' | """ | ||||
i, n = 0, len(pat) | i, n = 0, len(pat) | ||||
res = b'' | res = b'' | ||||
group = 0 | group = 0 | ||||
escape = util.stringutil.regexbytesescapemap.get | escape = util.stringutil.regexbytesescapemap.get | ||||
def peek(): | def peek(): | ||||
return i < n and pat[i : i + 1] | return i < n and pat[i : i + 1] | ||||
else: | else: | ||||
res += escape(c, c) | res += escape(c, c) | ||||
else: | else: | ||||
res += escape(c, c) | res += escape(c, c) | ||||
return res | return res | ||||
def _regex(kind, pat, globsuffix): | def _regex(kind, pat, globsuffix): | ||||
'''Convert a (normalized) pattern of any kind into a | """Convert a (normalized) pattern of any kind into a | ||||
regular expression. | regular expression. | ||||
globsuffix is appended to the regexp of globs.''' | globsuffix is appended to the regexp of globs.""" | ||||
if not pat and kind in (b'glob', b'relpath'): | if not pat and kind in (b'glob', b'relpath'): | ||||
return b'' | return b'' | ||||
if kind == b're': | if kind == b're': | ||||
return pat | return pat | ||||
if kind in (b'path', b'relpath'): | if kind in (b'path', b'relpath'): | ||||
if pat == b'.': | if pat == b'.': | ||||
return b'' | return b'' | ||||
return util.stringutil.reescape(pat) + b'(?:/|$)' | return util.stringutil.reescape(pat) + b'(?:/|$)' | ||||
return pat | return pat | ||||
return b'.*' + pat | return b'.*' + pat | ||||
if kind in (b'glob', b'rootglob'): | if kind in (b'glob', b'rootglob'): | ||||
return _globre(pat) + globsuffix | return _globre(pat) + globsuffix | ||||
raise error.ProgrammingError(b'not a regex pattern: %s:%s' % (kind, pat)) | raise error.ProgrammingError(b'not a regex pattern: %s:%s' % (kind, pat)) | ||||
def _buildmatch(kindpats, globsuffix, root): | def _buildmatch(kindpats, globsuffix, root): | ||||
'''Return regexp string and a matcher function for kindpats. | """Return regexp string and a matcher function for kindpats. | ||||
globsuffix is appended to the regexp of globs.''' | globsuffix is appended to the regexp of globs.""" | ||||
matchfuncs = [] | matchfuncs = [] | ||||
subincludes, kindpats = _expandsubinclude(kindpats, root) | subincludes, kindpats = _expandsubinclude(kindpats, root) | ||||
if subincludes: | if subincludes: | ||||
submatchers = {} | submatchers = {} | ||||
def matchsubinclude(f): | def matchsubinclude(f): | ||||
for prefix, matcherargs in subincludes: | for prefix, matcherargs in subincludes: | ||||
_(b"%s: invalid pattern (%s): %s") % (s, k, p) | _(b"%s: invalid pattern (%s): %s") % (s, k, p) | ||||
) | ) | ||||
else: | else: | ||||
raise error.Abort(_(b"invalid pattern (%s): %s") % (k, p)) | raise error.Abort(_(b"invalid pattern (%s): %s") % (k, p)) | ||||
raise error.Abort(_(b"invalid pattern")) | raise error.Abort(_(b"invalid pattern")) | ||||
def _patternrootsanddirs(kindpats): | def _patternrootsanddirs(kindpats): | ||||
'''Returns roots and directories corresponding to each pattern. | """Returns roots and directories corresponding to each pattern. | ||||
This calculates the roots and directories exactly matching the patterns and | This calculates the roots and directories exactly matching the patterns and | ||||
returns a tuple of (roots, dirs) for each. It does not return other | returns a tuple of (roots, dirs) for each. It does not return other | ||||
directories which may also need to be considered, like the parent | directories which may also need to be considered, like the parent | ||||
directories. | directories. | ||||
''' | """ | ||||
r = [] | r = [] | ||||
d = [] | d = [] | ||||
for kind, pat, source in kindpats: | for kind, pat, source in kindpats: | ||||
if kind in (b'glob', b'rootglob'): # find the non-glob prefix | if kind in (b'glob', b'rootglob'): # find the non-glob prefix | ||||
root = [] | root = [] | ||||
for p in pat.split(b'/'): | for p in pat.split(b'/'): | ||||
if b'[' in p or b'{' in p or b'*' in p or b'?' in p: | if b'[' in p or b'{' in p or b'*' in p or b'?' in p: | ||||
break | break | ||||
def _roots(kindpats): | def _roots(kindpats): | ||||
'''Returns root directories to match recursively from the given patterns.''' | '''Returns root directories to match recursively from the given patterns.''' | ||||
roots, dirs = _patternrootsanddirs(kindpats) | roots, dirs = _patternrootsanddirs(kindpats) | ||||
return roots | return roots | ||||
def _rootsdirsandparents(kindpats): | def _rootsdirsandparents(kindpats): | ||||
'''Returns roots and exact directories from patterns. | """Returns roots and exact directories from patterns. | ||||
`roots` are directories to match recursively, `dirs` should | `roots` are directories to match recursively, `dirs` should | ||||
be matched non-recursively, and `parents` are the implicitly required | be matched non-recursively, and `parents` are the implicitly required | ||||
directories to walk to items in either roots or dirs. | directories to walk to items in either roots or dirs. | ||||
Returns a tuple of (roots, dirs, parents). | Returns a tuple of (roots, dirs, parents). | ||||
>>> r = _rootsdirsandparents( | >>> r = _rootsdirsandparents( | ||||
... (b'path', b'', b'')]) | ... (b'path', b'', b'')]) | ||||
>>> print(r[0:2], sorted(r[2])) # the set has an unstable output | >>> print(r[0:2], sorted(r[2])) # the set has an unstable output | ||||
(['r', 'p/p', ''], []) ['', 'p'] | (['r', 'p/p', ''], []) ['', 'p'] | ||||
>>> r = _rootsdirsandparents( | >>> r = _rootsdirsandparents( | ||||
... [(b'relglob', b'rg*', b''), (b're', b're/', b''), | ... [(b'relglob', b'rg*', b''), (b're', b're/', b''), | ||||
... (b'relre', b'rr', b'')]) | ... (b'relre', b'rr', b'')]) | ||||
>>> print(r[0:2], sorted(r[2])) # the set has an unstable output | >>> print(r[0:2], sorted(r[2])) # the set has an unstable output | ||||
(['', '', ''], []) [''] | (['', '', ''], []) [''] | ||||
''' | """ | ||||
r, d = _patternrootsanddirs(kindpats) | r, d = _patternrootsanddirs(kindpats) | ||||
p = set() | p = set() | ||||
# Add the parents as non-recursive/exact directories, since they must be | # Add the parents as non-recursive/exact directories, since they must be | ||||
# scanned to get to either the roots or the other exact directories. | # scanned to get to either the roots or the other exact directories. | ||||
p.update(pathutil.dirs(d)) | p.update(pathutil.dirs(d)) | ||||
p.update(pathutil.dirs(r)) | p.update(pathutil.dirs(r)) | ||||
# FIXME: all uses of this function convert these to sets, do so before | # FIXME: all uses of this function convert these to sets, do so before | ||||
# returning. | # returning. | ||||
# FIXME: all uses of this function do not need anything in 'roots' and | # FIXME: all uses of this function do not need anything in 'roots' and | ||||
# 'dirs' to also be in 'parents', consider removing them before returning. | # 'dirs' to also be in 'parents', consider removing them before returning. | ||||
return r, d, p | return r, d, p | ||||
def _explicitfiles(kindpats): | def _explicitfiles(kindpats): | ||||
'''Returns the potential explicit filenames from the patterns. | """Returns the potential explicit filenames from the patterns. | ||||
>>> _explicitfiles([(b'path', b'foo/bar', b'')]) | >>> _explicitfiles([(b'path', b'foo/bar', b'')]) | ||||
['foo/bar'] | ['foo/bar'] | ||||
>>> _explicitfiles([(b'rootfilesin', b'foo/bar', b'')]) | >>> _explicitfiles([(b'rootfilesin', b'foo/bar', b'')]) | ||||
[] | [] | ||||
''' | """ | ||||
# Keep only the pattern kinds where one can specify filenames (vs only | # Keep only the pattern kinds where one can specify filenames (vs only | ||||
# directory names). | # directory names). | ||||
filable = [kp for kp in kindpats if kp[0] not in (b'rootfilesin',)] | filable = [kp for kp in kindpats if kp[0] not in (b'rootfilesin',)] | ||||
return _roots(filable) | return _roots(filable) | ||||
def _prefix(kindpats): | def _prefix(kindpats): | ||||
'''Whether all the patterns match a prefix (i.e. recursively)''' | '''Whether all the patterns match a prefix (i.e. recursively)''' | ||||
for kind, pat, source in kindpats: | for kind, pat, source in kindpats: | ||||
if kind not in (b'path', b'relpath'): | if kind not in (b'path', b'relpath'): | ||||
return False | return False | ||||
return True | return True | ||||
_commentre = None | _commentre = None | ||||
def readpatternfile(filepath, warn, sourceinfo=False): | def readpatternfile(filepath, warn, sourceinfo=False): | ||||
'''parse a pattern file, returning a list of | """parse a pattern file, returning a list of | ||||
patterns. These patterns should be given to compile() | patterns. These patterns should be given to compile() | ||||
to be validated and converted into a match function. | to be validated and converted into a match function. | ||||
trailing white space is dropped. | trailing white space is dropped. | ||||
the escape character is backslash. | the escape character is backslash. | ||||
comments start with #. | comments start with #. | ||||
empty lines are skipped. | empty lines are skipped. | ||||
lines can be of the following formats: | lines can be of the following formats: | ||||
syntax: regexp # defaults following lines to non-rooted regexps | syntax: regexp # defaults following lines to non-rooted regexps | ||||
syntax: glob # defaults following lines to non-rooted globs | syntax: glob # defaults following lines to non-rooted globs | ||||
re:pattern # non-rooted regular expression | re:pattern # non-rooted regular expression | ||||
glob:pattern # non-rooted glob | glob:pattern # non-rooted glob | ||||
rootglob:pat # rooted glob (same root as ^ in regexps) | rootglob:pat # rooted glob (same root as ^ in regexps) | ||||
pattern # pattern of the current default type | pattern # pattern of the current default type | ||||
if sourceinfo is set, returns a list of tuples: | if sourceinfo is set, returns a list of tuples: | ||||
(pattern, lineno, originalline). | (pattern, lineno, originalline). | ||||
This is useful to debug ignore patterns. | This is useful to debug ignore patterns. | ||||
''' | """ | ||||
syntaxes = { | syntaxes = { | ||||
b're': b'relre:', | b're': b'relre:', | ||||
b'regexp': b'relre:', | b'regexp': b'relre:', | ||||
b'glob': b'relglob:', | b'glob': b'relglob:', | ||||
b'rootglob': b'rootglob:', | b'rootglob': b'rootglob:', | ||||
b'include': b'include', | b'include': b'include', | ||||
b'subinclude': b'subinclude', | b'subinclude': b'subinclude', |
patches = mpatch.patches | patches = mpatch.patches | ||||
patchedsize = mpatch.patchedsize | patchedsize = mpatch.patchedsize | ||||
textdiff = bdiff.bdiff | textdiff = bdiff.bdiff | ||||
splitnewlines = bdiff.splitnewlines | splitnewlines = bdiff.splitnewlines | ||||
# TODO: this looks like it could be an attrs, which might help pytype | # TODO: this looks like it could be an attrs, which might help pytype | ||||
class diffopts(object): | class diffopts(object): | ||||
'''context is the number of context lines | """context is the number of context lines | ||||
text treats all files as text | text treats all files as text | ||||
showfunc enables diff -p output | showfunc enables diff -p output | ||||
git enables the git extended patch format | git enables the git extended patch format | ||||
nodates removes dates from diff headers | nodates removes dates from diff headers | ||||
nobinary ignores binary files | nobinary ignores binary files | ||||
noprefix disables the 'a/' and 'b/' prefixes (ignored in plain mode) | noprefix disables the 'a/' and 'b/' prefixes (ignored in plain mode) | ||||
ignorews ignores all whitespace changes in the diff | ignorews ignores all whitespace changes in the diff | ||||
ignorewsamount ignores changes in the amount of whitespace | ignorewsamount ignores changes in the amount of whitespace | ||||
ignoreblanklines ignores changes whose lines are all blank | ignoreblanklines ignores changes whose lines are all blank | ||||
upgrade generates git diffs to avoid data loss | upgrade generates git diffs to avoid data loss | ||||
''' | """ | ||||
_HAS_DYNAMIC_ATTRIBUTES = True | _HAS_DYNAMIC_ATTRIBUTES = True | ||||
defaults = { | defaults = { | ||||
b'context': 3, | b'context': 3, | ||||
b'text': False, | b'text': False, | ||||
b'showfunc': False, | b'showfunc': False, | ||||
b'git': False, | b'git': False, |
b'remote differs from untracked local', | b'remote differs from untracked local', | ||||
) | ) | ||||
elif config == b'abort': | elif config == b'abort': | ||||
abortconflicts.add(f) | abortconflicts.add(f) | ||||
else: | else: | ||||
if config == b'warn': | if config == b'warn': | ||||
warnconflicts.add(f) | warnconflicts.add(f) | ||||
mresult.addfile( | mresult.addfile( | ||||
f, mergestatemod.ACTION_GET, (fl2, True), b'remote created', | f, | ||||
mergestatemod.ACTION_GET, | |||||
(fl2, True), | |||||
b'remote created', | |||||
) | ) | ||||
for f in sorted(abortconflicts): | for f in sorted(abortconflicts): | ||||
warn = repo.ui.warn | warn = repo.ui.warn | ||||
if f in pathconflicts: | if f in pathconflicts: | ||||
if repo.wvfs.isfileorlink(f): | if repo.wvfs.isfileorlink(f): | ||||
warn(_(b"%s: untracked file conflicts with directory\n") % f) | warn(_(b"%s: untracked file conflicts with directory\n") % f) | ||||
else: | else: | ||||
for f in wctx.deleted(): | for f in wctx.deleted(): | ||||
if f not in mctx: | if f not in mctx: | ||||
mresult.addfile(f, m, None, b"forget deleted") | mresult.addfile(f, m, None, b"forget deleted") | ||||
if not branchmerge: | if not branchmerge: | ||||
for f in wctx.removed(): | for f in wctx.removed(): | ||||
if f not in mctx: | if f not in mctx: | ||||
mresult.addfile( | mresult.addfile( | ||||
f, mergestatemod.ACTION_FORGET, None, b"forget removed", | f, | ||||
mergestatemod.ACTION_FORGET, | |||||
None, | |||||
b"forget removed", | |||||
) | ) | ||||
def _checkcollision(repo, wmf, mresult): | def _checkcollision(repo, wmf, mresult): | ||||
""" | """ | ||||
Check for case-folding collisions. | Check for case-folding collisions. | ||||
""" | """ | ||||
# If the repo is narrowed, filter out files outside the narrowspec. | # If the repo is narrowed, filter out files outside the narrowspec. | ||||
) | ) | ||||
else: | else: | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b'conflict in file \'%s\' is outside narrow clone') % f | _(b'conflict in file \'%s\' is outside narrow clone') % f | ||||
) | ) | ||||
class mergeresult(object): | class mergeresult(object): | ||||
'''An object representing result of merging manifests. | """An object representing result of merging manifests. | ||||
It has information about what actions need to be performed on dirstate | It has information about what actions need to be performed on dirstate | ||||
mapping of divergent renames and other such cases.''' | mapping of divergent renames and other such cases.""" | ||||
def __init__(self): | def __init__(self): | ||||
""" | """ | ||||
filemapping: dict of filename as keys and action related info as values | filemapping: dict of filename as keys and action related info as values | ||||
diverge: mapping of source name -> list of dest name for | diverge: mapping of source name -> list of dest name for | ||||
divergent renames | divergent renames | ||||
renamedelete: mapping of source name -> list of destinations for files | renamedelete: mapping of source name -> list of destinations for files | ||||
deleted on one side and renamed on other. | deleted on one side and renamed on other. | ||||
commitinfo: dict containing data which should be used on commit | commitinfo: dict containing data which should be used on commit | ||||
contains a filename -> info mapping | contains a filename -> info mapping | ||||
actionmapping: dict of action names as keys and values are dict of | actionmapping: dict of action names as keys and values are dict of | ||||
filename as key and related data as values | filename as key and related data as values | ||||
""" | """ | ||||
self._filemapping = {} | self._filemapping = {} | ||||
self._diverge = {} | self._diverge = {} | ||||
self._renamedelete = {} | self._renamedelete = {} | ||||
self._commitinfo = collections.defaultdict(dict) | self._commitinfo = collections.defaultdict(dict) | ||||
self._actionmapping = collections.defaultdict(dict) | self._actionmapping = collections.defaultdict(dict) | ||||
def updatevalues(self, diverge, renamedelete): | def updatevalues(self, diverge, renamedelete): | ||||
self._diverge = diverge | self._diverge = diverge | ||||
self._renamedelete = renamedelete | self._renamedelete = renamedelete | ||||
def addfile(self, filename, action, data, message): | def addfile(self, filename, action, data, message): | ||||
""" adds a new file to the mergeresult object | """adds a new file to the mergeresult object | ||||
filename: file which we are adding | filename: file which we are adding | ||||
action: one of mergestatemod.ACTION_* | action: one of mergestatemod.ACTION_* | ||||
data: a tuple of information like fctx and ctx related to this merge | data: a tuple of information like fctx and ctx related to this merge | ||||
message: a message about the merge | message: a message about the merge | ||||
""" | """ | ||||
# if the file already existed, we need to delete it's old | # if the file already existed, we need to delete it's old | ||||
# entry form _actionmapping too | # entry form _actionmapping too | ||||
if filename in self._filemapping: | if filename in self._filemapping: | ||||
a, d, m = self._filemapping[filename] | a, d, m = self._filemapping[filename] | ||||
del self._actionmapping[a][filename] | del self._actionmapping[a][filename] | ||||
self._filemapping[filename] = (action, data, message) | self._filemapping[filename] = (action, data, message) | ||||
self._actionmapping[action][filename] = (data, message) | self._actionmapping[action][filename] = (data, message) | ||||
def getfile(self, filename, default_return=None): | def getfile(self, filename, default_return=None): | ||||
""" returns (action, args, msg) about this file | """returns (action, args, msg) about this file | ||||
returns default_return if the file is not present """ | returns default_return if the file is not present""" | ||||
if filename in self._filemapping: | if filename in self._filemapping: | ||||
return self._filemapping[filename] | return self._filemapping[filename] | ||||
return default_return | return default_return | ||||
def files(self, actions=None): | def files(self, actions=None): | ||||
""" returns files on which provided action needs to perfromed | """returns files on which provided action needs to perfromed | ||||
If actions is None, all files are returned | If actions is None, all files are returned | ||||
""" | """ | ||||
# TODO: think whether we should return renamedelete and | # TODO: think whether we should return renamedelete and | ||||
# diverge filenames also | # diverge filenames also | ||||
if actions is None: | if actions is None: | ||||
for f in self._filemapping: | for f in self._filemapping: | ||||
yield f | yield f | ||||
else: | else: | ||||
for a in actions: | for a in actions: | ||||
for f in self._actionmapping[a]: | for f in self._actionmapping[a]: | ||||
yield f | yield f | ||||
def removefile(self, filename): | def removefile(self, filename): | ||||
""" removes a file from the mergeresult object as the file might | """removes a file from the mergeresult object as the file might | ||||
not merging anymore """ | not merging anymore""" | ||||
action, data, message = self._filemapping[filename] | action, data, message = self._filemapping[filename] | ||||
del self._filemapping[filename] | del self._filemapping[filename] | ||||
del self._actionmapping[action][filename] | del self._actionmapping[action][filename] | ||||
def getactions(self, actions, sort=False): | def getactions(self, actions, sort=False): | ||||
""" get list of files which are marked with these actions | """get list of files which are marked with these actions | ||||
if sort is true, files for each action is sorted and then added | if sort is true, files for each action is sorted and then added | ||||
Returns a list of tuple of form (filename, data, message) | Returns a list of tuple of form (filename, data, message) | ||||
""" | """ | ||||
for a in actions: | for a in actions: | ||||
if sort: | if sort: | ||||
for f in sorted(self._actionmapping[a]): | for f in sorted(self._actionmapping[a]): | ||||
args, msg = self._actionmapping[a][f] | args, msg = self._actionmapping[a][f] | ||||
yield f, args, msg | yield f, args, msg | ||||
else: | else: | ||||
for f, (args, msg) in pycompat.iteritems( | for f, (args, msg) in pycompat.iteritems( | ||||
self._actionmapping[a] | self._actionmapping[a] | ||||
): | ): | ||||
yield f, args, msg | yield f, args, msg | ||||
def len(self, actions=None): | def len(self, actions=None): | ||||
""" returns number of files which needs actions | """returns number of files which needs actions | ||||
if actions is passed, total of number of files in that action | if actions is passed, total of number of files in that action | ||||
only is returned """ | only is returned""" | ||||
if actions is None: | if actions is None: | ||||
return len(self._filemapping) | return len(self._filemapping) | ||||
return sum(len(self._actionmapping[a]) for a in actions) | return sum(len(self._actionmapping[a]) for a in actions) | ||||
def filemap(self, sort=False): | def filemap(self, sort=False): | ||||
if sorted: | if sorted: | ||||
for key, val in sorted(pycompat.iteritems(self._filemapping)): | for key, val in sorted(pycompat.iteritems(self._filemapping)): | ||||
yield key, val | yield key, val | ||||
else: | else: | ||||
for key, val in pycompat.iteritems(self._filemapping): | for key, val in pycompat.iteritems(self._filemapping): | ||||
yield key, val | yield key, val | ||||
def addcommitinfo(self, filename, key, value): | def addcommitinfo(self, filename, key, value): | ||||
""" adds key-value information about filename which will be required | """adds key-value information about filename which will be required | ||||
while committing this merge """ | while committing this merge""" | ||||
self._commitinfo[filename][key] = value | self._commitinfo[filename][key] = value | ||||
@property | @property | ||||
def diverge(self): | def diverge(self): | ||||
return self._diverge | return self._diverge | ||||
@property | @property | ||||
def renamedelete(self): | def renamedelete(self): | ||||
return self._renamedelete | return self._renamedelete | ||||
@property | @property | ||||
def commitinfo(self): | def commitinfo(self): | ||||
return self._commitinfo | return self._commitinfo | ||||
@property | @property | ||||
def actionsdict(self): | def actionsdict(self): | ||||
""" returns a dictionary of actions to be perfomed with action as key | """returns a dictionary of actions to be perfomed with action as key | ||||
and a list of files and related arguments as values """ | and a list of files and related arguments as values""" | ||||
res = collections.defaultdict(list) | res = collections.defaultdict(list) | ||||
for a, d in pycompat.iteritems(self._actionmapping): | for a, d in pycompat.iteritems(self._actionmapping): | ||||
for f, (args, msg) in pycompat.iteritems(d): | for f, (args, msg) in pycompat.iteritems(d): | ||||
res[a].append((f, args, msg)) | res[a].append((f, args, msg)) | ||||
return res | return res | ||||
def setactions(self, actions): | def setactions(self, actions): | ||||
self._filemapping = actions | self._filemapping = actions | ||||
self._actionmapping = collections.defaultdict(dict) | self._actionmapping = collections.defaultdict(dict) | ||||
for f, (act, data, msg) in pycompat.iteritems(self._filemapping): | for f, (act, data, msg) in pycompat.iteritems(self._filemapping): | ||||
self._actionmapping[act][f] = data, msg | self._actionmapping[act][f] = data, msg | ||||
def hasconflicts(self): | def hasconflicts(self): | ||||
""" tells whether this merge resulted in some actions which can | """tells whether this merge resulted in some actions which can | ||||
result in conflicts or not """ | result in conflicts or not""" | ||||
for a in self._actionmapping.keys(): | for a in self._actionmapping.keys(): | ||||
if ( | if ( | ||||
a | a | ||||
not in ( | not in ( | ||||
mergestatemod.ACTION_GET, | mergestatemod.ACTION_GET, | ||||
mergestatemod.ACTION_EXEC, | mergestatemod.ACTION_EXEC, | ||||
mergestatemod.ACTION_REMOVE, | mergestatemod.ACTION_REMOVE, | ||||
mergestatemod.ACTION_PATH_CONFLICT_RESOLVE, | mergestatemod.ACTION_PATH_CONFLICT_RESOLVE, | ||||
b'other replaced from %s' % fa, | b'other replaced from %s' % fa, | ||||
) | ) | ||||
else: | else: | ||||
a = ma[f] | a = ma[f] | ||||
fla = ma.flags(f) | fla = ma.flags(f) | ||||
nol = b'l' not in fl1 + fl2 + fla | nol = b'l' not in fl1 + fl2 + fla | ||||
if n2 == a and fl2 == fla: | if n2 == a and fl2 == fla: | ||||
mresult.addfile( | mresult.addfile( | ||||
f, mergestatemod.ACTION_KEEP, (), b'remote unchanged', | f, | ||||
mergestatemod.ACTION_KEEP, | |||||
(), | |||||
b'remote unchanged', | |||||
) | ) | ||||
elif n1 == a and fl1 == fla: # local unchanged - use remote | elif n1 == a and fl1 == fla: # local unchanged - use remote | ||||
if n1 == n2: # optimization: keep local content | if n1 == n2: # optimization: keep local content | ||||
mresult.addfile( | mresult.addfile( | ||||
f, | f, | ||||
mergestatemod.ACTION_EXEC, | mergestatemod.ACTION_EXEC, | ||||
(fl2,), | (fl2,), | ||||
b'update permissions', | b'update permissions', | ||||
if branchmerge: | if branchmerge: | ||||
mresult.addcommitinfo( | mresult.addcommitinfo( | ||||
f, b'merge-removal-candidate', b'yes' | f, b'merge-removal-candidate', b'yes' | ||||
) | ) | ||||
elif n1 == addednodeid: | elif n1 == addednodeid: | ||||
# This file was locally added. We should forget it instead of | # This file was locally added. We should forget it instead of | ||||
# deleting it. | # deleting it. | ||||
mresult.addfile( | mresult.addfile( | ||||
f, mergestatemod.ACTION_FORGET, None, b'remote deleted', | f, | ||||
mergestatemod.ACTION_FORGET, | |||||
None, | |||||
b'remote deleted', | |||||
) | ) | ||||
else: | else: | ||||
mresult.addfile( | mresult.addfile( | ||||
f, mergestatemod.ACTION_REMOVE, None, b'other deleted', | f, | ||||
mergestatemod.ACTION_REMOVE, | |||||
None, | |||||
b'other deleted', | |||||
) | ) | ||||
if branchmerge: | if branchmerge: | ||||
# the file must be absent after merging, | # the file must be absent after merging, | ||||
# howeber the user might make | # howeber the user might make | ||||
# the file reappear using revert and if they does, | # the file reappear using revert and if they does, | ||||
# we force create a new node | # we force create a new node | ||||
mresult.addcommitinfo( | mresult.addcommitinfo( | ||||
f, b'merge-removal-candidate', b'yes' | f, b'merge-removal-candidate', b'yes' | ||||
renamedelete.update(branch_copies2.renamedelete) | renamedelete.update(branch_copies2.renamedelete) | ||||
mresult.updatevalues(diverge, renamedelete) | mresult.updatevalues(diverge, renamedelete) | ||||
return mresult | return mresult | ||||
def _resolvetrivial(repo, wctx, mctx, ancestor, mresult): | def _resolvetrivial(repo, wctx, mctx, ancestor, mresult): | ||||
"""Resolves false conflicts where the nodeid changed but the content | """Resolves false conflicts where the nodeid changed but the content | ||||
remained the same.""" | remained the same.""" | ||||
# We force a copy of actions.items() because we're going to mutate | # We force a copy of actions.items() because we're going to mutate | ||||
# actions as we resolve trivial conflicts. | # actions as we resolve trivial conflicts. | ||||
for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))): | for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))): | ||||
if f in ancestor and not wctx[f].cmp(ancestor[f]): | if f in ancestor and not wctx[f].cmp(ancestor[f]): | ||||
# local did change but ended up with same content | # local did change but ended up with same content | ||||
mresult.addfile( | mresult.addfile( | ||||
f, mergestatemod.ACTION_REMOVE, None, b'prompt same' | f, mergestatemod.ACTION_REMOVE, None, b'prompt same' | ||||
) | ) | ||||
mergestatemod.ACTION_LOCAL_DIR_RENAME_GET, | mergestatemod.ACTION_LOCAL_DIR_RENAME_GET, | ||||
mergestatemod.ACTION_MERGE, | mergestatemod.ACTION_MERGE, | ||||
] | ] | ||||
) | ) | ||||
prefetch = scmutil.prefetchfiles | prefetch = scmutil.prefetchfiles | ||||
matchfiles = scmutil.matchfiles | matchfiles = scmutil.matchfiles | ||||
prefetch( | prefetch( | ||||
repo, [(ctx.rev(), matchfiles(repo, files),)], | repo, | ||||
[ | |||||
( | |||||
ctx.rev(), | |||||
matchfiles(repo, files), | |||||
) | |||||
], | |||||
) | ) | ||||
@attr.s(frozen=True) | @attr.s(frozen=True) | ||||
class updateresult(object): | class updateresult(object): | ||||
updatedcount = attr.ib() | updatedcount = attr.ib() | ||||
mergedcount = attr.ib() | mergedcount = attr.ib() | ||||
removedcount = attr.ib() | removedcount = attr.ib() | ||||
unresolvedcount = attr.ib() | unresolvedcount = attr.ib() | ||||
def isempty(self): | def isempty(self): | ||||
return not ( | return not ( | ||||
self.updatedcount | self.updatedcount | ||||
or self.mergedcount | or self.mergedcount | ||||
or self.removedcount | or self.removedcount | ||||
or self.unresolvedcount | or self.unresolvedcount | ||||
) | ) | ||||
def applyupdates( | def applyupdates( | ||||
repo, mresult, wctx, mctx, overwrite, wantfiledata, labels=None, | repo, | ||||
mresult, | |||||
wctx, | |||||
mctx, | |||||
overwrite, | |||||
wantfiledata, | |||||
labels=None, | |||||
): | ): | ||||
"""apply the merge action list to the working directory | """apply the merge action list to the working directory | ||||
mresult is a mergeresult object representing result of the merge | mresult is a mergeresult object representing result of the merge | ||||
wctx is the working copy context | wctx is the working copy context | ||||
mctx is the context to be merged into the working copy | mctx is the context to be merged into the working copy | ||||
Return a tuple of (counts, filedata), where counts is a tuple | Return a tuple of (counts, filedata), where counts is a tuple | ||||
b'fsmonitor', b'warn_update_file_count' | b'fsmonitor', b'warn_update_file_count' | ||||
) | ) | ||||
# avoid cycle dirstate -> sparse -> merge -> dirstate | # avoid cycle dirstate -> sparse -> merge -> dirstate | ||||
from . import dirstate | from . import dirstate | ||||
if dirstate.rustmod is not None: | if dirstate.rustmod is not None: | ||||
# When using rust status, fsmonitor becomes necessary at higher sizes | # When using rust status, fsmonitor becomes necessary at higher sizes | ||||
fsmonitorthreshold = repo.ui.configint( | fsmonitorthreshold = repo.ui.configint( | ||||
b'fsmonitor', b'warn_update_file_count_rust', | b'fsmonitor', | ||||
b'warn_update_file_count_rust', | |||||
) | ) | ||||
try: | try: | ||||
# avoid cycle: extensions -> cmdutil -> merge | # avoid cycle: extensions -> cmdutil -> merge | ||||
from . import extensions | from . import extensions | ||||
extensions.find(b'fsmonitor') | extensions.find(b'fsmonitor') | ||||
fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off' | fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off' | ||||
b"local%(l)s changed %(f)s which other%(o)s deleted\n" | b"local%(l)s changed %(f)s which other%(o)s deleted\n" | ||||
b"use (c)hanged version or (d)elete?" | b"use (c)hanged version or (d)elete?" | ||||
b"$$ &Changed $$ &Delete" | b"$$ &Changed $$ &Delete" | ||||
) | ) | ||||
% prompts, | % prompts, | ||||
0, | 0, | ||||
): | ): | ||||
mresult.addfile( | mresult.addfile( | ||||
f, mergestatemod.ACTION_REMOVE, None, b'prompt delete', | f, | ||||
mergestatemod.ACTION_REMOVE, | |||||
None, | |||||
b'prompt delete', | |||||
) | ) | ||||
elif f in p1: | elif f in p1: | ||||
mresult.addfile( | mresult.addfile( | ||||
f, | f, | ||||
mergestatemod.ACTION_ADD_MODIFIED, | mergestatemod.ACTION_ADD_MODIFIED, | ||||
None, | None, | ||||
b'prompt keep', | b'prompt keep', | ||||
) | ) | ||||
else: | else: | ||||
mresult.addfile( | mresult.addfile( | ||||
f, mergestatemod.ACTION_ADD, None, b'prompt keep', | f, | ||||
mergestatemod.ACTION_ADD, | |||||
None, | |||||
b'prompt keep', | |||||
) | ) | ||||
elif m == mergestatemod.ACTION_DELETED_CHANGED: | elif m == mergestatemod.ACTION_DELETED_CHANGED: | ||||
f1, f2, fa, move, anc = args | f1, f2, fa, move, anc = args | ||||
flags = p2[f2].flags() | flags = p2[f2].flags() | ||||
if ( | if ( | ||||
repo.ui.promptchoice( | repo.ui.promptchoice( | ||||
_( | _( | ||||
b"other%(o)s changed %(f)s which local%(l)s deleted\n" | b"other%(o)s changed %(f)s which local%(l)s deleted\n" | ||||
repo.vfs.write(b'updatestate', p2.hex()) | repo.vfs.write(b'updatestate', p2.hex()) | ||||
_advertisefsmonitor( | _advertisefsmonitor( | ||||
repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node() | repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node() | ||||
) | ) | ||||
wantfiledata = updatedirstate and not branchmerge | wantfiledata = updatedirstate and not branchmerge | ||||
stats, getfiledata = applyupdates( | stats, getfiledata = applyupdates( | ||||
repo, mresult, wc, p2, overwrite, wantfiledata, labels=labels, | repo, | ||||
mresult, | |||||
wc, | |||||
p2, | |||||
overwrite, | |||||
wantfiledata, | |||||
labels=labels, | |||||
) | ) | ||||
if updatedirstate: | if updatedirstate: | ||||
with repo.dirstate.parentchange(): | with repo.dirstate.parentchange(): | ||||
repo.setparents(fp1, fp2) | repo.setparents(fp1, fp2) | ||||
mergestatemod.recordupdates( | mergestatemod.recordupdates( | ||||
repo, mresult.actionsdict, branchmerge, getfiledata | repo, mresult.actionsdict, branchmerge, getfiledata | ||||
) | ) |
NO_OP_ACTIONS = ( | NO_OP_ACTIONS = ( | ||||
ACTION_KEEP, | ACTION_KEEP, | ||||
ACTION_KEEP_ABSENT, | ACTION_KEEP_ABSENT, | ||||
ACTION_KEEP_NEW, | ACTION_KEEP_NEW, | ||||
) | ) | ||||
class _mergestate_base(object): | class _mergestate_base(object): | ||||
'''track 3-way merge state of individual files | """track 3-way merge state of individual files | ||||
The merge state is stored on disk when needed. Two files are used: one with | The merge state is stored on disk when needed. Two files are used: one with | ||||
an old format (version 1), and one with a new format (version 2). Version 2 | an old format (version 1), and one with a new format (version 2). Version 2 | ||||
stores a superset of the data in version 1, including new kinds of records | stores a superset of the data in version 1, including new kinds of records | ||||
in the future. For more about the new format, see the documentation for | in the future. For more about the new format, see the documentation for | ||||
`_readrecordsv2`. | `_readrecordsv2`. | ||||
Each record can contain arbitrary content, and has an associated type. This | Each record can contain arbitrary content, and has an associated type. This | ||||
u: unresolved conflict | u: unresolved conflict | ||||
r: resolved conflict | r: resolved conflict | ||||
pu: unresolved path conflict (file conflicts with directory) | pu: unresolved path conflict (file conflicts with directory) | ||||
pr: resolved path conflict | pr: resolved path conflict | ||||
o: file was merged in favor of other parent of merge (DEPRECATED) | o: file was merged in favor of other parent of merge (DEPRECATED) | ||||
The resolve command transitions between 'u' and 'r' for conflicts and | The resolve command transitions between 'u' and 'r' for conflicts and | ||||
'pu' and 'pr' for path conflicts. | 'pu' and 'pr' for path conflicts. | ||||
''' | """ | ||||
def __init__(self, repo): | def __init__(self, repo): | ||||
"""Initialize the merge state. | """Initialize the merge state. | ||||
Do not use this directly! Instead call read() or clean().""" | Do not use this directly! Instead call read() or clean().""" | ||||
self._repo = repo | self._repo = repo | ||||
self._state = {} | self._state = {} | ||||
self._stateextras = collections.defaultdict(dict) | self._stateextras = collections.defaultdict(dict) | ||||
path: the path that conflicts | path: the path that conflicts | ||||
frename: the filename the conflicting file was renamed to | frename: the filename the conflicting file was renamed to | ||||
forigin: origin of the file ('l' or 'r' for local/remote) | forigin: origin of the file ('l' or 'r' for local/remote) | ||||
""" | """ | ||||
self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin] | self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin] | ||||
self._dirty = True | self._dirty = True | ||||
def addcommitinfo(self, path, data): | def addcommitinfo(self, path, data): | ||||
""" stores information which is required at commit | """stores information which is required at commit | ||||
into _stateextras """ | into _stateextras""" | ||||
self._stateextras[path].update(data) | self._stateextras[path].update(data) | ||||
self._dirty = True | self._dirty = True | ||||
def __contains__(self, dfile): | def __contains__(self, dfile): | ||||
return dfile in self._state | return dfile in self._state | ||||
def __getitem__(self, dfile): | def __getitem__(self, dfile): | ||||
return self._state[dfile][0] | return self._state[dfile][0] |
elif p1.rev() == p2.rev(): | elif p1.rev() == p2.rev(): | ||||
# In the wild, one can encounter such "non-merge" | # In the wild, one can encounter such "non-merge" | ||||
return _process_linear(p1, ctx) | return _process_linear(p1, ctx) | ||||
else: | else: | ||||
return _process_merge(p1, p2, ctx) | return _process_merge(p1, p2, ctx) | ||||
def _process_root(ctx): | def _process_root(ctx): | ||||
"""compute the appropriate changed files for a changeset with no parents | """compute the appropriate changed files for a changeset with no parents""" | ||||
""" | |||||
# Simple, there was nothing before it, so everything is added. | # Simple, there was nothing before it, so everything is added. | ||||
md = ChangingFiles() | md = ChangingFiles() | ||||
manifest = ctx.manifest() | manifest = ctx.manifest() | ||||
for filename in manifest: | for filename in manifest: | ||||
md.mark_added(filename) | md.mark_added(filename) | ||||
return md | return md | ||||
def _process_linear(parent_ctx, children_ctx, parent=1): | def _process_linear(parent_ctx, children_ctx, parent=1): | ||||
"""compute the appropriate changed files for a changeset with a single parent | """compute the appropriate changed files for a changeset with a single parent""" | ||||
""" | |||||
md = ChangingFiles() | md = ChangingFiles() | ||||
parent_manifest = parent_ctx.manifest() | parent_manifest = parent_ctx.manifest() | ||||
children_manifest = children_ctx.manifest() | children_manifest = children_ctx.manifest() | ||||
copies_candidate = [] | copies_candidate = [] | ||||
for filename, d in parent_manifest.diff(children_manifest).items(): | for filename, d in parent_manifest.diff(children_manifest).items(): | ||||
if d[1][0] is None: | if d[1][0] is None: | ||||
assert False, "unreachable" | assert False, "unreachable" | ||||
def _missing_from_all_ancestors(mas, filename): | def _missing_from_all_ancestors(mas, filename): | ||||
return all(_find(ma, filename) is None for ma in mas) | return all(_find(ma, filename) is None for ma in mas) | ||||
def computechangesetfilesadded(ctx): | def computechangesetfilesadded(ctx): | ||||
"""return the list of files added in a changeset | """return the list of files added in a changeset""" | ||||
""" | |||||
added = [] | added = [] | ||||
for f in ctx.files(): | for f in ctx.files(): | ||||
if not any(f in p for p in ctx.parents()): | if not any(f in p for p in ctx.parents()): | ||||
added.append(f) | added.append(f) | ||||
return added | return added | ||||
def get_removal_filter(ctx, x=None): | def get_removal_filter(ctx, x=None): | ||||
return all(f in ma and ma.find(f) == m2.find(f) for ma in mas()) | return all(f in ma and ma.find(f) == m2.find(f) for ma in mas()) | ||||
else: | else: | ||||
return True | return True | ||||
return deletionfromparent | return deletionfromparent | ||||
def computechangesetfilesremoved(ctx): | def computechangesetfilesremoved(ctx): | ||||
"""return the list of files removed in a changeset | """return the list of files removed in a changeset""" | ||||
""" | |||||
removed = [] | removed = [] | ||||
for f in ctx.files(): | for f in ctx.files(): | ||||
if f not in ctx: | if f not in ctx: | ||||
removed.append(f) | removed.append(f) | ||||
if removed: | if removed: | ||||
rf = get_removal_filter(ctx) | rf = get_removal_filter(ctx) | ||||
removed = [r for r in removed if not rf(r)] | removed = [r for r in removed if not rf(r)] | ||||
return removed | return removed | ||||
def computechangesetfilesmerged(ctx): | def computechangesetfilesmerged(ctx): | ||||
"""return the list of files merged in a changeset | """return the list of files merged in a changeset""" | ||||
""" | |||||
merged = [] | merged = [] | ||||
if len(ctx.parents()) < 2: | if len(ctx.parents()) < 2: | ||||
return merged | return merged | ||||
for f in ctx.files(): | for f in ctx.files(): | ||||
if f in ctx: | if f in ctx: | ||||
fctx = ctx[f] | fctx = ctx[f] | ||||
parents = fctx._filelog.parents(fctx._filenode) | parents = fctx._filelog.parents(fctx._filenode) | ||||
if parents[1] != node.nullid: | if parents[1] != node.nullid: |
return b"%s\n%s\n\n" % (s, b"." * encoding.colwidth(s)) | return b"%s\n%s\n\n" % (s, b"." * encoding.colwidth(s)) | ||||
def subsubsubsubsection(s): | def subsubsubsubsection(s): | ||||
return b"%s\n%s\n\n" % (s, b"'" * encoding.colwidth(s)) | return b"%s\n%s\n\n" % (s, b"'" * encoding.colwidth(s)) | ||||
def replace(text, substs): | def replace(text, substs): | ||||
''' | """ | ||||
Apply a list of (find, replace) pairs to a text. | Apply a list of (find, replace) pairs to a text. | ||||
>>> replace(b"foo bar", [(b'f', b'F'), (b'b', b'B')]) | >>> replace(b"foo bar", [(b'f', b'F'), (b'b', b'B')]) | ||||
'Foo Bar' | 'Foo Bar' | ||||
>>> encoding.encoding = b'latin1' | >>> encoding.encoding = b'latin1' | ||||
>>> replace(b'\\x81\\\\', [(b'\\\\', b'/')]) | >>> replace(b'\\x81\\\\', [(b'\\\\', b'/')]) | ||||
'\\x81/' | '\\x81/' | ||||
>>> encoding.encoding = b'shiftjis' | >>> encoding.encoding = b'shiftjis' | ||||
>>> replace(b'\\x81\\\\', [(b'\\\\', b'/')]) | >>> replace(b'\\x81\\\\', [(b'\\\\', b'/')]) | ||||
'\\x81\\\\' | '\\x81\\\\' | ||||
''' | """ | ||||
# some character encodings (cp932 for Japanese, at least) use | # some character encodings (cp932 for Japanese, at least) use | ||||
# ASCII characters other than control/alphabet/digit as a part of | # ASCII characters other than control/alphabet/digit as a part of | ||||
# multi-bytes characters, so direct replacing with such characters | # multi-bytes characters, so direct replacing with such characters | ||||
# on strings in local encoding causes invalid byte sequences. | # on strings in local encoding causes invalid byte sequences. | ||||
utext = text.decode(pycompat.sysstr(encoding.encoding)) | utext = text.decode(pycompat.sysstr(encoding.encoding)) | ||||
for f, t in substs: | for f, t in substs: | ||||
utext = utext.replace(f.decode("ascii"), t.decode("ascii")) | utext = utext.replace(f.decode("ascii"), t.decode("ascii")) | ||||
i += 1 | i += 1 | ||||
return blocks, pruned | return blocks, pruned | ||||
_sectionre = re.compile(br"""^([-=`:.'"~^_*+#])\1+$""") | _sectionre = re.compile(br"""^([-=`:.'"~^_*+#])\1+$""") | ||||
def findtables(blocks): | def findtables(blocks): | ||||
'''Find simple tables | """Find simple tables | ||||
Only simple one-line table elements are supported | Only simple one-line table elements are supported | ||||
''' | """ | ||||
for block in blocks: | for block in blocks: | ||||
# Searching for a block that looks like this: | # Searching for a block that looks like this: | ||||
# | # | ||||
# === ==== === | # === ==== === | ||||
# A B C | # A B C | ||||
# === ==== === <- optional | # === ==== === <- optional | ||||
# 1 2 3 | # 1 2 3 | ||||
This groups bullets, options, and definitions together with no vertical | This groups bullets, options, and definitions together with no vertical | ||||
space between them, and adds an empty block between all other blocks. | space between them, and adds an empty block between all other blocks. | ||||
""" | """ | ||||
i = 1 | i = 1 | ||||
while i < len(blocks): | while i < len(blocks): | ||||
if blocks[i][b'type'] == blocks[i - 1][b'type'] and blocks[i][ | if blocks[i][b'type'] == blocks[i - 1][b'type'] and blocks[i][ | ||||
b'type' | b'type' | ||||
] in (b'bullet', b'option', b'field',): | ] in ( | ||||
b'bullet', | |||||
b'option', | |||||
b'field', | |||||
): | |||||
i += 1 | i += 1 | ||||
elif not blocks[i - 1][b'lines']: | elif not blocks[i - 1][b'lines']: | ||||
# no lines in previous block, do not separate | # no lines in previous block, do not separate | ||||
i += 1 | i += 1 | ||||
else: | else: | ||||
blocks.insert( | blocks.insert( | ||||
i, {b'lines': [b''], b'indent': 0, b'type': b'margin'} | i, {b'lines': [b''], b'indent': 0, b'type': b'margin'} | ||||
) | ) |
def clearwcbackup(repo, backupname): | def clearwcbackup(repo, backupname): | ||||
if requirements.NARROW_REQUIREMENT not in repo.requirements: | if requirements.NARROW_REQUIREMENT not in repo.requirements: | ||||
return | return | ||||
repo.vfs.tryunlink(backupname) | repo.vfs.tryunlink(backupname) | ||||
def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes): | def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes): | ||||
r""" Restricts the patterns according to repo settings, | r"""Restricts the patterns according to repo settings, | ||||
results in a logical AND operation | results in a logical AND operation | ||||
:param req_includes: requested includes | :param req_includes: requested includes | ||||
:param req_excludes: requested excludes | :param req_excludes: requested excludes | ||||
:param repo_includes: repo includes | :param repo_includes: repo includes | ||||
:param repo_excludes: repo excludes | :param repo_excludes: repo excludes | ||||
:return: include patterns, exclude patterns, and invalid include patterns. | :return: include patterns, exclude patterns, and invalid include patterns. | ||||
""" | """ |
# we have a public predecessor | # we have a public predecessor | ||||
bumped.add(rev) | bumped.add(rev) | ||||
break # Next draft! | break # Next draft! | ||||
return bumped | return bumped | ||||
@cachefor(b'contentdivergent') | @cachefor(b'contentdivergent') | ||||
def _computecontentdivergentset(repo): | def _computecontentdivergentset(repo): | ||||
"""the set of rev that compete to be the final successors of some revision. | """the set of rev that compete to be the final successors of some revision.""" | ||||
""" | |||||
divergent = set() | divergent = set() | ||||
obsstore = repo.obsstore | obsstore = repo.obsstore | ||||
newermap = {} | newermap = {} | ||||
tonode = repo.changelog.node | tonode = repo.changelog.node | ||||
for rev in repo.revs(b'(not public()) - obsolete()'): | for rev in repo.revs(b'(not public()) - obsolete()'): | ||||
node = tonode(rev) | node = tonode(rev) | ||||
mark = obsstore.predecessors.get(node, ()) | mark = obsstore.predecessors.get(node, ()) | ||||
toprocess = set(mark) | toprocess = set(mark) |
re.compile(b'^branch$'), | re.compile(b'^branch$'), | ||||
re.compile(b'^.*-source$'), | re.compile(b'^.*-source$'), | ||||
re.compile(b'^.*_source$'), | re.compile(b'^.*_source$'), | ||||
re.compile(b'^source$'), | re.compile(b'^source$'), | ||||
] | ] | ||||
def metanotblacklisted(metaitem): | def metanotblacklisted(metaitem): | ||||
""" Check that the key of a meta item (extrakey, extravalue) does not | """Check that the key of a meta item (extrakey, extravalue) does not | ||||
match at least one of the blacklist pattern | match at least one of the blacklist pattern | ||||
""" | """ | ||||
metakey = metaitem[0] | metakey = metaitem[0] | ||||
return not any(pattern.match(metakey) for pattern in METABLACKLIST) | return not any(pattern.match(metakey) for pattern in METABLACKLIST) | ||||
def _prepare_hunk(hunk): | def _prepare_hunk(hunk): | ||||
right = _getdifflines(rightdiff) | right = _getdifflines(rightdiff) | ||||
if left != right: | if left != right: | ||||
return False | return False | ||||
return True | return True | ||||
def geteffectflag(source, successors): | def geteffectflag(source, successors): | ||||
""" From an obs-marker relation, compute what changed between the | """From an obs-marker relation, compute what changed between the | ||||
predecessor and the successor. | predecessor and the successor. | ||||
""" | """ | ||||
effects = 0 | effects = 0 | ||||
for changectx in successors: | for changectx in successors: | ||||
# Check if description has changed | # Check if description has changed | ||||
if changectx.description() != source.description(): | if changectx.description() != source.description(): | ||||
effects |= DESCCHANGED | effects |= DESCCHANGED | ||||
values = [] | values = [] | ||||
for sset in fullsuccessorsets: | for sset in fullsuccessorsets: | ||||
values.append({b'successors': sset, b'markers': sset.markers}) | values.append({b'successors': sset, b'markers': sset.markers}) | ||||
return values | return values | ||||
def _getobsfate(successorssets): | def _getobsfate(successorssets): | ||||
""" Compute a changeset obsolescence fate based on its successorssets. | """Compute a changeset obsolescence fate based on its successorssets. | ||||
Successors can be the tipmost ones or the immediate ones. This function | Successors can be the tipmost ones or the immediate ones. This function | ||||
return values are not meant to be shown directly to users, it is meant to | return values are not meant to be shown directly to users, it is meant to | ||||
be used by internal functions only. | be used by internal functions only. | ||||
Returns one fate from the following values: | Returns one fate from the following values: | ||||
- pruned | - pruned | ||||
- diverged | - diverged | ||||
- superseded | - superseded | ||||
- superseded_split | - superseded_split | ||||
if len(successors) == 1: | if len(successors) == 1: | ||||
return b'superseded' | return b'superseded' | ||||
else: | else: | ||||
return b'superseded_split' | return b'superseded_split' | ||||
def obsfateverb(successorset, markers): | def obsfateverb(successorset, markers): | ||||
""" Return the verb summarizing the successorset and potentially using | """Return the verb summarizing the successorset and potentially using | ||||
information from the markers | information from the markers | ||||
""" | """ | ||||
if not successorset: | if not successorset: | ||||
verb = b'pruned' | verb = b'pruned' | ||||
elif len(successorset) == 1: | elif len(successorset) == 1: | ||||
verb = b'rewritten' | verb = b'rewritten' | ||||
else: | else: | ||||
verb = b'split' | verb = b'split' | ||||
return verb | return verb | ||||
def markersdates(markers): | def markersdates(markers): | ||||
"""returns the list of dates for a list of markers | """returns the list of dates for a list of markers""" | ||||
""" | |||||
return [m[4] for m in markers] | return [m[4] for m in markers] | ||||
def markersusers(markers): | def markersusers(markers): | ||||
""" Returns a sorted list of markers users without duplicates | """Returns a sorted list of markers users without duplicates""" | ||||
""" | |||||
markersmeta = [dict(m[3]) for m in markers] | markersmeta = [dict(m[3]) for m in markers] | ||||
users = { | users = { | ||||
encoding.tolocal(meta[b'user']) | encoding.tolocal(meta[b'user']) | ||||
for meta in markersmeta | for meta in markersmeta | ||||
if meta.get(b'user') | if meta.get(b'user') | ||||
} | } | ||||
return sorted(users) | return sorted(users) | ||||
def markersoperations(markers): | def markersoperations(markers): | ||||
""" Returns a sorted list of markers operations without duplicates | """Returns a sorted list of markers operations without duplicates""" | ||||
""" | |||||
markersmeta = [dict(m[3]) for m in markers] | markersmeta = [dict(m[3]) for m in markers] | ||||
operations = { | operations = { | ||||
meta.get(b'operation') for meta in markersmeta if meta.get(b'operation') | meta.get(b'operation') for meta in markersmeta if meta.get(b'operation') | ||||
} | } | ||||
return sorted(operations) | return sorted(operations) | ||||
def obsfateprinter(ui, repo, successors, markers, formatctx): | def obsfateprinter(ui, repo, successors, markers, formatctx): | ||||
""" Build a obsfate string for a single successorset using all obsfate | """Build a obsfate string for a single successorset using all obsfate | ||||
related function defined in obsutil | related function defined in obsutil | ||||
""" | """ | ||||
quiet = ui.quiet | quiet = ui.quiet | ||||
verbose = ui.verbose | verbose = ui.verbose | ||||
normal = not verbose and not quiet | normal = not verbose and not quiet | ||||
line = [] | line = [] | ||||
b"superseded_split": _(b"hidden revision '%s' was split as: %s"), | b"superseded_split": _(b"hidden revision '%s' was split as: %s"), | ||||
b"superseded_split_several": _( | b"superseded_split_several": _( | ||||
b"hidden revision '%s' was split as: %s and %d more" | b"hidden revision '%s' was split as: %s and %d more" | ||||
), | ), | ||||
} | } | ||||
def _getfilteredreason(repo, changeid, ctx): | def _getfilteredreason(repo, changeid, ctx): | ||||
"""return a human-friendly string on why a obsolete changeset is hidden | """return a human-friendly string on why a obsolete changeset is hidden""" | ||||
""" | |||||
successors = successorssets(repo, ctx.node()) | successors = successorssets(repo, ctx.node()) | ||||
fate = _getobsfate(successors) | fate = _getobsfate(successors) | ||||
# Be more precise in case the revision is superseded | # Be more precise in case the revision is superseded | ||||
if fate == b'pruned': | if fate == b'pruned': | ||||
return filteredmsgtable[b'pruned'] % changeid | return filteredmsgtable[b'pruned'] % changeid | ||||
elif fate == b'diverged': | elif fate == b'diverged': | ||||
return filteredmsgtable[b'diverged'] % changeid | return filteredmsgtable[b'diverged'] % changeid |
if placeholder is not None and not isinstance(placeholder, tuple): | if placeholder is not None and not isinstance(placeholder, tuple): | ||||
raise error.ProgrammingError(b'placeholder must be a node tuple') | raise error.ProgrammingError(b'placeholder must be a node tuple') | ||||
matches = [tree] | matches = [tree] | ||||
if _matchtree(pattern, tree, placeholder, incompletenodes, matches): | if _matchtree(pattern, tree, placeholder, incompletenodes, matches): | ||||
return matches | return matches | ||||
def parseerrordetail(inst): | def parseerrordetail(inst): | ||||
"""Compose error message from specified ParseError object | """Compose error message from specified ParseError object""" | ||||
""" | |||||
if inst.location is not None: | if inst.location is not None: | ||||
return _(b'at %d: %s') % (inst.location, inst.message) | return _(b'at %d: %s') % (inst.location, inst.message) | ||||
else: | else: | ||||
return inst.message | return inst.message | ||||
class alias(object): | class alias(object): | ||||
"""Parsed result of alias""" | """Parsed result of alias""" |
(b'Date', b'date'), | (b'Date', b'date'), | ||||
(b'Branch', b'branch'), | (b'Branch', b'branch'), | ||||
(b'Node ID', b'nodeid'), | (b'Node ID', b'nodeid'), | ||||
] | ] | ||||
@contextlib.contextmanager | @contextlib.contextmanager | ||||
def extract(ui, fileobj): | def extract(ui, fileobj): | ||||
'''extract patch from data read from fileobj. | """extract patch from data read from fileobj. | ||||
patch can be a normal patch or contained in an email message. | patch can be a normal patch or contained in an email message. | ||||
return a dictionary. Standard keys are: | return a dictionary. Standard keys are: | ||||
- filename, | - filename, | ||||
- message, | - message, | ||||
- user, | - user, | ||||
- date, | - date, | ||||
- branch, | - branch, | ||||
- node, | - node, | ||||
- p1, | - p1, | ||||
- p2. | - p2. | ||||
Any item can be missing from the dictionary. If filename is missing, | Any item can be missing from the dictionary. If filename is missing, | ||||
fileobj did not contain a patch. Caller must unlink filename when done.''' | fileobj did not contain a patch. Caller must unlink filename when done.""" | ||||
fd, tmpname = pycompat.mkstemp(prefix=b'hg-patch-') | fd, tmpname = pycompat.mkstemp(prefix=b'hg-patch-') | ||||
tmpfp = os.fdopen(fd, 'wb') | tmpfp = os.fdopen(fd, 'wb') | ||||
try: | try: | ||||
yield _extract(ui, fileobj, tmpname, tmpfp) | yield _extract(ui, fileobj, tmpname, tmpfp) | ||||
finally: | finally: | ||||
tmpfp.close() | tmpfp.close() | ||||
os.unlink(tmpname) | os.unlink(tmpname) | ||||
def close(self): | def close(self): | ||||
if self.dirty: | if self.dirty: | ||||
self.writelines(self.fname, self.lines, self.mode) | self.writelines(self.fname, self.lines, self.mode) | ||||
self.write_rej() | self.write_rej() | ||||
return len(self.rej) | return len(self.rej) | ||||
class header(object): | class header(object): | ||||
"""patch header | """patch header""" | ||||
""" | |||||
diffgit_re = re.compile(b'diff --git a/(.*) b/(.*)$') | diffgit_re = re.compile(b'diff --git a/(.*) b/(.*)$') | ||||
diff_re = re.compile(b'diff -r .* (.*)$') | diff_re = re.compile(b'diff -r .* (.*)$') | ||||
allhunks_re = re.compile(b'(?:index|deleted file) ') | allhunks_re = re.compile(b'(?:index|deleted file) ') | ||||
pretty_re = re.compile(b'(?:new file|deleted file) ') | pretty_re = re.compile(b'(?:new file|deleted file) ') | ||||
special_re = re.compile(b'(?:index|deleted|copy|rename|new mode) ') | special_re = re.compile(b'(?:index|deleted|copy|rename|new mode) ') | ||||
newfile_re = re.compile(b'(?:new file|copy to|rename to)') | newfile_re = re.compile(b'(?:new file|copy to|rename to)') | ||||
b'unhandled transition: %s -> %s' % (state, newstate) | b'unhandled transition: %s -> %s' % (state, newstate) | ||||
) | ) | ||||
state = newstate | state = newstate | ||||
del fp | del fp | ||||
return p.finished() | return p.finished() | ||||
def pathtransform(path, strip, prefix): | def pathtransform(path, strip, prefix): | ||||
'''turn a path from a patch into a path suitable for the repository | """turn a path from a patch into a path suitable for the repository | ||||
prefix, if not empty, is expected to be normalized with a / at the end. | prefix, if not empty, is expected to be normalized with a / at the end. | ||||
Returns (stripped components, path in repository). | Returns (stripped components, path in repository). | ||||
>>> pathtransform(b'a/b/c', 0, b'') | >>> pathtransform(b'a/b/c', 0, b'') | ||||
('', 'a/b/c') | ('', 'a/b/c') | ||||
>>> pathtransform(b' a/b/c ', 0, b'') | >>> pathtransform(b' a/b/c ', 0, b'') | ||||
('', ' a/b/c') | ('', ' a/b/c') | ||||
>>> pathtransform(b' a/b/c ', 2, b'') | >>> pathtransform(b' a/b/c ', 2, b'') | ||||
('a/b/', 'c') | ('a/b/', 'c') | ||||
>>> pathtransform(b'a/b/c', 0, b'd/e/') | >>> pathtransform(b'a/b/c', 0, b'd/e/') | ||||
('', 'd/e/a/b/c') | ('', 'd/e/a/b/c') | ||||
>>> pathtransform(b' a//b/c ', 2, b'd/e/') | >>> pathtransform(b' a//b/c ', 2, b'd/e/') | ||||
('a//b/', 'd/e/c') | ('a//b/', 'd/e/c') | ||||
>>> pathtransform(b'a/b/c', 3, b'') | >>> pathtransform(b'a/b/c', 3, b'') | ||||
Traceback (most recent call last): | Traceback (most recent call last): | ||||
PatchError: unable to strip away 1 of 3 dirs from a/b/c | PatchError: unable to strip away 1 of 3 dirs from a/b/c | ||||
''' | """ | ||||
pathlen = len(path) | pathlen = len(path) | ||||
i = 0 | i = 0 | ||||
if strip == 0: | if strip == 0: | ||||
return b'', prefix + path.rstrip() | return b'', prefix + path.rstrip() | ||||
count = strip | count = strip | ||||
while count > 0: | while count > 0: | ||||
i = path.find(b'/', i) | i = path.find(b'/', i) | ||||
if i == -1: | if i == -1: | ||||
changes=None, | changes=None, | ||||
opts=None, | opts=None, | ||||
losedatafn=None, | losedatafn=None, | ||||
pathfn=None, | pathfn=None, | ||||
copy=None, | copy=None, | ||||
copysourcematch=None, | copysourcematch=None, | ||||
hunksfilterfn=None, | hunksfilterfn=None, | ||||
): | ): | ||||
'''yields diff of changes to files between two nodes, or node and | """yields diff of changes to files between two nodes, or node and | ||||
working directory. | working directory. | ||||
if node1 is None, use first dirstate parent instead. | if node1 is None, use first dirstate parent instead. | ||||
if node2 is None, compare node1 with working directory. | if node2 is None, compare node1 with working directory. | ||||
losedatafn(**kwarg) is a callable run when opts.upgrade=True and | losedatafn(**kwarg) is a callable run when opts.upgrade=True and | ||||
every time some change cannot be represented with the current | every time some change cannot be represented with the current | ||||
patch format. Return False to upgrade to git patch format, True to | patch format. Return False to upgrade to git patch format, True to | ||||
copy, if not empty, should contain mappings {dst@y: src@x} of copy | copy, if not empty, should contain mappings {dst@y: src@x} of copy | ||||
information. | information. | ||||
if copysourcematch is not None, then copy sources will be filtered by this | if copysourcematch is not None, then copy sources will be filtered by this | ||||
matcher | matcher | ||||
hunksfilterfn, if not None, should be a function taking a filectx and | hunksfilterfn, if not None, should be a function taking a filectx and | ||||
hunks generator that may yield filtered hunks. | hunks generator that may yield filtered hunks. | ||||
''' | """ | ||||
if not node1 and not node2: | if not node1 and not node2: | ||||
node1 = repo.dirstate.p1() | node1 = repo.dirstate.p1() | ||||
ctx1 = repo[node1] | ctx1 = repo[node1] | ||||
ctx2 = repo[node2] | ctx2 = repo[node2] | ||||
for fctx1, fctx2, hdr, hunks in diffhunks( | for fctx1, fctx2, hdr, hunks in diffhunks( | ||||
repo, | repo, | ||||
def diffui(*args, **kw): | def diffui(*args, **kw): | ||||
'''like diff(), but yields 2-tuples of (output, label) for ui.write()''' | '''like diff(), but yields 2-tuples of (output, label) for ui.write()''' | ||||
return difflabel(diff, *args, **kw) | return difflabel(diff, *args, **kw) | ||||
def _filepairs(modified, added, removed, copy, opts): | def _filepairs(modified, added, removed, copy, opts): | ||||
'''generates tuples (f1, f2, copyop), where f1 is the name of the file | """generates tuples (f1, f2, copyop), where f1 is the name of the file | ||||
before and f2 is the the name after. For added files, f1 will be None, | before and f2 is the the name after. For added files, f1 will be None, | ||||
and for removed files, f2 will be None. copyop may be set to None, 'copy' | and for removed files, f2 will be None. copyop may be set to None, 'copy' | ||||
or 'rename' (the latter two only if opts.git is set).''' | or 'rename' (the latter two only if opts.git is set).""" | ||||
gone = set() | gone = set() | ||||
copyto = {v: k for k, v in copy.items()} | copyto = {v: k for k, v in copy.items()} | ||||
addedset, removedset = set(added), set(removed) | addedset, removedset = set(added), set(removed) | ||||
for f in sorted(modified + added + removed): | for f in sorted(modified + added + removed): | ||||
copyop = None | copyop = None | ||||
added, | added, | ||||
removed, | removed, | ||||
copy, | copy, | ||||
getfilectx, | getfilectx, | ||||
opts, | opts, | ||||
losedatafn, | losedatafn, | ||||
pathfn, | pathfn, | ||||
): | ): | ||||
'''given input data, generate a diff and yield it in blocks | """given input data, generate a diff and yield it in blocks | ||||
If generating a diff would lose data like flags or binary data and | If generating a diff would lose data like flags or binary data and | ||||
losedatafn is not None, it will be called. | losedatafn is not None, it will be called. | ||||
pathfn is applied to every path in the diff output. | pathfn is applied to every path in the diff output. | ||||
''' | """ | ||||
if opts.noprefix: | if opts.noprefix: | ||||
aprefix = bprefix = b'' | aprefix = bprefix = b'' | ||||
else: | else: | ||||
aprefix = b'a/' | aprefix = b'a/' | ||||
bprefix = b'b/' | bprefix = b'b/' | ||||
def diffline(f, revs): | def diffline(f, revs): | ||||
content2 = fctx2.data() | content2 = fctx2.data() | ||||
data1 = (ctx1, fctx1, path1, flag1, content1, date1) | data1 = (ctx1, fctx1, path1, flag1, content1, date1) | ||||
data2 = (ctx2, fctx2, path2, flag2, content2, date2) | data2 = (ctx2, fctx2, path2, flag2, content2, date2) | ||||
yield diffcontent(data1, data2, header, binary, opts) | yield diffcontent(data1, data2, header, binary, opts) | ||||
def diffcontent(data1, data2, header, binary, opts): | def diffcontent(data1, data2, header, binary, opts): | ||||
""" diffs two versions of a file. | """diffs two versions of a file. | ||||
data1 and data2 are tuples containg: | data1 and data2 are tuples containg: | ||||
* ctx: changeset for the file | * ctx: changeset for the file | ||||
* fctx: file context for that file | * fctx: file context for that file | ||||
* path1: name of the file | * path1: name of the file | ||||
* flag: flags of the file | * flag: flags of the file | ||||
* content: full content of the file (can be null in case of binary) | * content: full content of the file (can be null in case of binary) | ||||
_(b' %d files changed, %d insertions(+), %d deletions(-)\n') | _(b' %d files changed, %d insertions(+), %d deletions(-)\n') | ||||
% (len(stats), totaladds, totalremoves) | % (len(stats), totaladds, totalremoves) | ||||
) | ) | ||||
return b''.join(output) | return b''.join(output) | ||||
def diffstatui(*args, **kw): | def diffstatui(*args, **kw): | ||||
'''like diffstat(), but yields 2-tuples of (output, label) for | """like diffstat(), but yields 2-tuples of (output, label) for | ||||
ui.write() | ui.write() | ||||
''' | """ | ||||
for line in diffstat(*args, **kw).splitlines(): | for line in diffstat(*args, **kw).splitlines(): | ||||
if line and line[-1] in b'+-': | if line and line[-1] in b'+-': | ||||
name, graph = line.rsplit(b' ', 1) | name, graph = line.rsplit(b' ', 1) | ||||
yield (name + b' ', b'') | yield (name + b' ', b'') | ||||
m = re.search(br'\++', graph) | m = re.search(br'\++', graph) | ||||
if m: | if m: | ||||
yield (m.group(0), b'diffstat.inserted') | yield (m.group(0), b'diffstat.inserted') | ||||
m = re.search(br'-+', graph) | m = re.search(br'-+', graph) | ||||
if m: | if m: | ||||
yield (m.group(0), b'diffstat.deleted') | yield (m.group(0), b'diffstat.deleted') | ||||
else: | else: | ||||
yield (line, b'') | yield (line, b'') | ||||
yield (b'\n', b'') | yield (b'\n', b'') |
parsers = policy.importmod('parsers') | parsers = policy.importmod('parsers') | ||||
def _lowerclean(s): | def _lowerclean(s): | ||||
return encoding.hfsignoreclean(s.lower()) | return encoding.hfsignoreclean(s.lower()) | ||||
class pathauditor(object): | class pathauditor(object): | ||||
'''ensure that a filesystem path contains no banned components. | """ensure that a filesystem path contains no banned components. | ||||
the following properties of a path are checked: | the following properties of a path are checked: | ||||
- ends with a directory separator | - ends with a directory separator | ||||
- under top-level .hg | - under top-level .hg | ||||
- starts at the root of a windows drive | - starts at the root of a windows drive | ||||
- contains ".." | - contains ".." | ||||
More check are also done about the file system states: | More check are also done about the file system states: | ||||
- traverses a symlink (e.g. a/symlink_here/b) | - traverses a symlink (e.g. a/symlink_here/b) | ||||
- inside a nested repository (a callback can be used to approve | - inside a nested repository (a callback can be used to approve | ||||
some nested repositories, e.g., subrepositories) | some nested repositories, e.g., subrepositories) | ||||
The file system checks are only done when 'realfs' is set to True (the | The file system checks are only done when 'realfs' is set to True (the | ||||
default). They should be disable then we are auditing path for operation on | default). They should be disable then we are auditing path for operation on | ||||
stored history. | stored history. | ||||
If 'cached' is set to True, audited paths and sub-directories are cached. | If 'cached' is set to True, audited paths and sub-directories are cached. | ||||
Be careful to not keep the cache of unmanaged directories for long because | Be careful to not keep the cache of unmanaged directories for long because | ||||
audited paths may be replaced with symlinks. | audited paths may be replaced with symlinks. | ||||
''' | """ | ||||
def __init__(self, root, callback=None, realfs=True, cached=False): | def __init__(self, root, callback=None, realfs=True, cached=False): | ||||
self.audited = set() | self.audited = set() | ||||
self.auditeddir = set() | self.auditeddir = set() | ||||
self.root = root | self.root = root | ||||
self._realfs = realfs | self._realfs = realfs | ||||
self._cached = cached | self._cached = cached | ||||
self.callback = callback | self.callback = callback | ||||
if os.path.lexists(root) and not util.fscasesensitive(root): | if os.path.lexists(root) and not util.fscasesensitive(root): | ||||
self.normcase = util.normcase | self.normcase = util.normcase | ||||
else: | else: | ||||
self.normcase = lambda x: x | self.normcase = lambda x: x | ||||
def __call__(self, path, mode=None): | def __call__(self, path, mode=None): | ||||
'''Check the relative path. | """Check the relative path. | ||||
path may contain a pattern (e.g. foodir/**.txt)''' | path may contain a pattern (e.g. foodir/**.txt)""" | ||||
path = util.localpath(path) | path = util.localpath(path) | ||||
normpath = self.normcase(path) | normpath = self.normcase(path) | ||||
if normpath in self.audited: | if normpath in self.audited: | ||||
return | return | ||||
# AIX ignores "/" at end of path, others raise EISDIR. | # AIX ignores "/" at end of path, others raise EISDIR. | ||||
if util.endswithsep(path): | if util.endswithsep(path): | ||||
raise error.Abort(_(b"path ends in directory separator: %s") % path) | raise error.Abort(_(b"path ends in directory separator: %s") % path) | ||||
yield | yield | ||||
finally: | finally: | ||||
self.audited.clear() | self.audited.clear() | ||||
self.auditeddir.clear() | self.auditeddir.clear() | ||||
self._cached = False | self._cached = False | ||||
def canonpath(root, cwd, myname, auditor=None): | def canonpath(root, cwd, myname, auditor=None): | ||||
'''return the canonical path of myname, given cwd and root | """return the canonical path of myname, given cwd and root | ||||
>>> def check(root, cwd, myname): | >>> def check(root, cwd, myname): | ||||
... a = pathauditor(root, realfs=False) | ... a = pathauditor(root, realfs=False) | ||||
... try: | ... try: | ||||
... return canonpath(root, cwd, myname, a) | ... return canonpath(root, cwd, myname, a) | ||||
... except error.Abort: | ... except error.Abort: | ||||
... return 'aborted' | ... return 'aborted' | ||||
>>> def unixonly(root, cwd, myname, expected='aborted'): | >>> def unixonly(root, cwd, myname, expected='aborted'): | ||||
>>> unixonly(b'/repo', b'/', b'filename') | >>> unixonly(b'/repo', b'/', b'filename') | ||||
'aborted' | 'aborted' | ||||
>>> unixonly(b'/repo', b'/', b'repo/filename', b'filename') | >>> unixonly(b'/repo', b'/', b'repo/filename', b'filename') | ||||
'filename' | 'filename' | ||||
>>> unixonly(b'/repo', b'/repo', b'filename', b'filename') | >>> unixonly(b'/repo', b'/repo', b'filename', b'filename') | ||||
'filename' | 'filename' | ||||
>>> unixonly(b'/repo', b'/repo/subdir', b'filename', b'subdir/filename') | >>> unixonly(b'/repo', b'/repo/subdir', b'filename', b'subdir/filename') | ||||
'subdir/filename' | 'subdir/filename' | ||||
''' | """ | ||||
if util.endswithsep(root): | if util.endswithsep(root): | ||||
rootsep = root | rootsep = root | ||||
else: | else: | ||||
rootsep = root + pycompat.ossep | rootsep = root + pycompat.ossep | ||||
name = myname | name = myname | ||||
if not os.path.isabs(name): | if not os.path.isabs(name): | ||||
name = os.path.join(root, cwd, name) | name = os.path.join(root, cwd, name) | ||||
name = os.path.normpath(name) | name = os.path.normpath(name) | ||||
pass | pass | ||||
raise error.Abort( | raise error.Abort( | ||||
_(b"%s not under root '%s'") % (myname, root), hint=hint | _(b"%s not under root '%s'") % (myname, root), hint=hint | ||||
) | ) | ||||
def normasprefix(path): | def normasprefix(path): | ||||
'''normalize the specified path as path prefix | """normalize the specified path as path prefix | ||||
Returned value can be used safely for "p.startswith(prefix)", | Returned value can be used safely for "p.startswith(prefix)", | ||||
"p[len(prefix):]", and so on. | "p[len(prefix):]", and so on. | ||||
For efficiency, this expects "path" argument to be already | For efficiency, this expects "path" argument to be already | ||||
normalized by "os.path.normpath", "os.path.realpath", and so on. | normalized by "os.path.normpath", "os.path.realpath", and so on. | ||||
See also issue3033 for detail about need of this function. | See also issue3033 for detail about need of this function. | ||||
>>> normasprefix(b'/foo/bar').replace(pycompat.ossep, b'/') | >>> normasprefix(b'/foo/bar').replace(pycompat.ossep, b'/') | ||||
'/foo/bar/' | '/foo/bar/' | ||||
>>> normasprefix(b'/').replace(pycompat.ossep, b'/') | >>> normasprefix(b'/').replace(pycompat.ossep, b'/') | ||||
'/' | '/' | ||||
''' | """ | ||||
d, p = os.path.splitdrive(path) | d, p = os.path.splitdrive(path) | ||||
if len(p) != len(pycompat.ossep): | if len(p) != len(pycompat.ossep): | ||||
return path + pycompat.ossep | return path + pycompat.ossep | ||||
else: | else: | ||||
return path | return path | ||||
def finddirs(path): | def finddirs(path): | ||||
pos = path.rfind(b'/') | pos = path.rfind(b'/') | ||||
while pos != -1: | while pos != -1: | ||||
yield path[:pos] | yield path[:pos] | ||||
pos = path.rfind(b'/', 0, pos) | pos = path.rfind(b'/', 0, pos) | ||||
yield b'' | yield b'' | ||||
class dirs(object): | class dirs(object): | ||||
'''a multiset of directory names from a set of file paths''' | '''a multiset of directory names from a set of file paths''' | ||||
def __init__(self, map, skip=None): | def __init__(self, map, skip=None): | ||||
''' | """ | ||||
a dict map indicates a dirstate while a list indicates a manifest | a dict map indicates a dirstate while a list indicates a manifest | ||||
''' | """ | ||||
self._dirs = {} | self._dirs = {} | ||||
addpath = self.addpath | addpath = self.addpath | ||||
if isinstance(map, dict) and skip is not None: | if isinstance(map, dict) and skip is not None: | ||||
for f, s in pycompat.iteritems(map): | for f, s in pycompat.iteritems(map): | ||||
if s[0] != skip: | if s[0] != skip: | ||||
addpath(f) | addpath(f) | ||||
elif skip is not None: | elif skip is not None: | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( |
else: | else: | ||||
# The underlying file object seeks as required in Python 3: | # The underlying file object seeks as required in Python 3: | ||||
# https://github.com/python/cpython/blob/v3.7.3/Modules/_io/fileio.c#L474 | # https://github.com/python/cpython/blob/v3.7.3/Modules/_io/fileio.c#L474 | ||||
posixfile = open | posixfile = open | ||||
def split(p): | def split(p): | ||||
'''Same as posixpath.split, but faster | """Same as posixpath.split, but faster | ||||
>>> import posixpath | >>> import posixpath | ||||
>>> for f in [b'/absolute/path/to/file', | >>> for f in [b'/absolute/path/to/file', | ||||
... b'relative/path/to/file', | ... b'relative/path/to/file', | ||||
... b'file_alone', | ... b'file_alone', | ||||
... b'path/to/directory/', | ... b'path/to/directory/', | ||||
... b'/multiple/path//separators', | ... b'/multiple/path//separators', | ||||
... b'/file_at_root', | ... b'/file_at_root', | ||||
... b'///multiple_leading_separators_at_root', | ... b'///multiple_leading_separators_at_root', | ||||
... b'']: | ... b'']: | ||||
... assert split(f) == posixpath.split(f), f | ... assert split(f) == posixpath.split(f), f | ||||
''' | """ | ||||
ht = p.rsplit(b'/', 1) | ht = p.rsplit(b'/', 1) | ||||
if len(ht) == 1: | if len(ht) == 1: | ||||
return b'', p | return b'', p | ||||
nh = ht[0].rstrip(b'/') | nh = ht[0].rstrip(b'/') | ||||
if nh: | if nh: | ||||
return nh, ht[1] | return nh, ht[1] | ||||
return ht[0] + b'/', ht[1] | return ht[0] + b'/', ht[1] | ||||
# and obey umask. | # and obey umask. | ||||
os.chmod(f, s | (s & 0o444) >> 2 & ~umask) | os.chmod(f, s | (s & 0o444) >> 2 & ~umask) | ||||
elif not x and sx: | elif not x and sx: | ||||
# Turn off all +x bits | # Turn off all +x bits | ||||
os.chmod(f, s & 0o666) | os.chmod(f, s & 0o666) | ||||
def copymode(src, dst, mode=None, enforcewritable=False): | def copymode(src, dst, mode=None, enforcewritable=False): | ||||
'''Copy the file mode from the file at path src to dst. | """Copy the file mode from the file at path src to dst. | ||||
If src doesn't exist, we're using mode instead. If mode is None, we're | If src doesn't exist, we're using mode instead. If mode is None, we're | ||||
using umask.''' | using umask.""" | ||||
try: | try: | ||||
st_mode = os.lstat(src).st_mode & 0o777 | st_mode = os.lstat(src).st_mode & 0o777 | ||||
except OSError as inst: | except OSError as inst: | ||||
if inst.errno != errno.ENOENT: | if inst.errno != errno.ENOENT: | ||||
raise | raise | ||||
st_mode = mode | st_mode = mode | ||||
if st_mode is None: | if st_mode is None: | ||||
st_mode = ~umask | st_mode = ~umask | ||||
except OSError as inst: | except OSError as inst: | ||||
# sshfs might report failure while successfully creating the link | # sshfs might report failure while successfully creating the link | ||||
if inst.errno == errno.EIO and os.path.exists(name): | if inst.errno == errno.EIO and os.path.exists(name): | ||||
unlink(name) | unlink(name) | ||||
return False | return False | ||||
def checkosfilename(path): | def checkosfilename(path): | ||||
'''Check that the base-relative path is a valid filename on this platform. | """Check that the base-relative path is a valid filename on this platform. | ||||
Returns None if the path is ok, or a UI string describing the problem.''' | Returns None if the path is ok, or a UI string describing the problem.""" | ||||
return None # on posix platforms, every path is ok | return None # on posix platforms, every path is ok | ||||
def getfsmountpoint(dirpath): | def getfsmountpoint(dirpath): | ||||
'''Get the filesystem mount point from a directory (best-effort) | """Get the filesystem mount point from a directory (best-effort) | ||||
Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. | ||||
''' | """ | ||||
return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath) | return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath) | ||||
def getfstype(dirpath): | def getfstype(dirpath): | ||||
'''Get the filesystem type name from a directory (best-effort) | """Get the filesystem type name from a directory (best-effort) | ||||
Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. | ||||
''' | """ | ||||
return getattr(osutil, 'getfstype', lambda x: None)(dirpath) | return getattr(osutil, 'getfstype', lambda x: None)(dirpath) | ||||
def setbinary(fd): | def setbinary(fd): | ||||
pass | pass | ||||
def pconvert(path): | def pconvert(path): | ||||
# what normcase does to ASCII strings | # what normcase does to ASCII strings | ||||
normcasespec = encoding.normcasespecs.lower | normcasespec = encoding.normcasespecs.lower | ||||
# fallback normcase function for non-ASCII strings | # fallback normcase function for non-ASCII strings | ||||
normcasefallback = normcase | normcasefallback = normcase | ||||
if pycompat.isdarwin: | if pycompat.isdarwin: | ||||
def normcase(path): | def normcase(path): | ||||
''' | """ | ||||
Normalize a filename for OS X-compatible comparison: | Normalize a filename for OS X-compatible comparison: | ||||
- escape-encode invalid characters | - escape-encode invalid characters | ||||
- decompose to NFD | - decompose to NFD | ||||
- lowercase | - lowercase | ||||
- omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff] | - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff] | ||||
>>> normcase(b'UPPER') | >>> normcase(b'UPPER') | ||||
'upper' | 'upper' | ||||
>>> normcase(b'Caf\\xc3\\xa9') | >>> normcase(b'Caf\\xc3\\xa9') | ||||
'cafe\\xcc\\x81' | 'cafe\\xcc\\x81' | ||||
>>> normcase(b'\\xc3\\x89') | >>> normcase(b'\\xc3\\x89') | ||||
'e\\xcc\\x81' | 'e\\xcc\\x81' | ||||
>>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918 | >>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918 | ||||
'%b8%ca%c3\\xca\\xbe%c8.jpg' | '%b8%ca%c3\\xca\\xbe%c8.jpg' | ||||
''' | """ | ||||
try: | try: | ||||
return encoding.asciilower(path) # exception for non-ASCII | return encoding.asciilower(path) # exception for non-ASCII | ||||
except UnicodeDecodeError: | except UnicodeDecodeError: | ||||
return normcasefallback(path) | return normcasefallback(path) | ||||
normcasespec = encoding.normcasespecs.lower | normcasespec = encoding.normcasespecs.lower | ||||
if pycompat.sysplatform == b'cygwin': | if pycompat.sysplatform == b'cygwin': | ||||
# workaround for cygwin, in which mount point part of path is | # workaround for cygwin, in which mount point part of path is | ||||
# treated as case sensitive, even though underlying NTFS is case | # treated as case sensitive, even though underlying NTFS is case | ||||
# insensitive. | # insensitive. | ||||
# default mount points | # default mount points | ||||
cygwinmountpoints = sorted( | cygwinmountpoints = sorted( | ||||
[b"/usr/bin", b"/usr/lib", b"/cygdrive",], reverse=True | [ | ||||
b"/usr/bin", | |||||
b"/usr/lib", | |||||
b"/cygdrive", | |||||
], | |||||
reverse=True, | |||||
) | ) | ||||
# use upper-ing as normcase as same as NTFS workaround | # use upper-ing as normcase as same as NTFS workaround | ||||
def normcase(path): | def normcase(path): | ||||
pathlen = len(path) | pathlen = len(path) | ||||
if (pathlen == 0) or (path[0] != pycompat.ossep): | if (pathlen == 0) or (path[0] != pycompat.ossep): | ||||
# treat as relative | # treat as relative | ||||
return encoding.upper(path) | return encoding.upper(path) | ||||
def isowner(st): | def isowner(st): | ||||
"""Return True if the stat object st is from the current user.""" | """Return True if the stat object st is from the current user.""" | ||||
return st.st_uid == os.getuid() | return st.st_uid == os.getuid() | ||||
def findexe(command): | def findexe(command): | ||||
'''Find executable for command searching like which does. | """Find executable for command searching like which does. | ||||
If command is a basename then PATH is searched for command. | If command is a basename then PATH is searched for command. | ||||
PATH isn't searched if command is an absolute or relative path. | PATH isn't searched if command is an absolute or relative path. | ||||
If command isn't found None is returned.''' | If command isn't found None is returned.""" | ||||
if pycompat.sysplatform == b'OpenVMS': | if pycompat.sysplatform == b'OpenVMS': | ||||
return command | return command | ||||
def findexisting(executable): | def findexisting(executable): | ||||
b'Will return executable if existing file' | b'Will return executable if existing file' | ||||
if os.path.isfile(executable) and os.access(executable, os.X_OK): | if os.path.isfile(executable) and os.access(executable, os.X_OK): | ||||
return executable | return executable | ||||
return None | return None | ||||
def setsignalhandler(): | def setsignalhandler(): | ||||
pass | pass | ||||
_wantedkinds = {stat.S_IFREG, stat.S_IFLNK} | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} | ||||
def statfiles(files): | def statfiles(files): | ||||
'''Stat each file in files. Yield each stat, or None if a file does not | """Stat each file in files. Yield each stat, or None if a file does not | ||||
exist or has a type we don't care about.''' | exist or has a type we don't care about.""" | ||||
lstat = os.lstat | lstat = os.lstat | ||||
getkind = stat.S_IFMT | getkind = stat.S_IFMT | ||||
for nf in files: | for nf in files: | ||||
try: | try: | ||||
st = lstat(nf) | st = lstat(nf) | ||||
if getkind(st.st_mode) not in _wantedkinds: | if getkind(st.st_mode) not in _wantedkinds: | ||||
st = None | st = None | ||||
except OSError as err: | except OSError as err: |
# it's been long enough we should print anyway | # it's been long enough we should print anyway | ||||
or now - self.lastprint >= self.changedelay | or now - self.lastprint >= self.changedelay | ||||
): | ): | ||||
return True | return True | ||||
else: | else: | ||||
return False | return False | ||||
def _calibrateestimate(self, topic, now, pos): | def _calibrateestimate(self, topic, now, pos): | ||||
'''Adjust starttimes and startvals for topic so ETA works better | """Adjust starttimes and startvals for topic so ETA works better | ||||
If progress is non-linear (ex. get much slower in the last minute), | If progress is non-linear (ex. get much slower in the last minute), | ||||
it's more friendly to only use a recent time span for ETA and speed | it's more friendly to only use a recent time span for ETA and speed | ||||
calculation. | calculation. | ||||
[======================================> ] | [======================================> ] | ||||
^^^^^^^ | ^^^^^^^ | ||||
estimateinterval, only use this for estimation | estimateinterval, only use this for estimation | ||||
''' | """ | ||||
interval = self.estimateinterval | interval = self.estimateinterval | ||||
if interval <= 0: | if interval <= 0: | ||||
return | return | ||||
elapsed = now - self.starttimes[topic] | elapsed = now - self.starttimes[topic] | ||||
if elapsed > interval: | if elapsed > interval: | ||||
delta = pos - self.startvals[topic] | delta = pos - self.startvals[topic] | ||||
newdelta = delta * interval / elapsed | newdelta = delta * interval / elapsed | ||||
# If a stall happens temporarily, ETA could change dramatically | # If a stall happens temporarily, ETA could change dramatically |
try: | try: | ||||
s.decode('ascii') | s.decode('ascii') | ||||
return True | return True | ||||
except UnicodeDecodeError: | except UnicodeDecodeError: | ||||
return False | return False | ||||
def asciilower(s): | def asciilower(s): | ||||
'''convert a string to lowercase if ASCII | """convert a string to lowercase if ASCII | ||||
Raises UnicodeDecodeError if non-ASCII characters are found.''' | Raises UnicodeDecodeError if non-ASCII characters are found.""" | ||||
s.decode('ascii') | s.decode('ascii') | ||||
return s.lower() | return s.lower() | ||||
def asciiupper(s): | def asciiupper(s): | ||||
'''convert a string to uppercase if ASCII | """convert a string to uppercase if ASCII | ||||
Raises UnicodeDecodeError if non-ASCII characters are found.''' | Raises UnicodeDecodeError if non-ASCII characters are found.""" | ||||
s.decode('ascii') | s.decode('ascii') | ||||
return s.upper() | return s.upper() | ||||
_jsonmap = [] | _jsonmap = [] | ||||
_jsonmap.extend(b"\\u%04x" % x for x in range(32)) | _jsonmap.extend(b"\\u%04x" % x for x in range(32)) | ||||
_jsonmap.extend(pycompat.bytechr(x) for x in range(32, 127)) | _jsonmap.extend(pycompat.bytechr(x) for x in range(32, 127)) | ||||
_jsonmap.append(b'\\u007f') | _jsonmap.append(b'\\u007f') |
import struct | import struct | ||||
from .. import pycompat | from .. import pycompat | ||||
stringio = pycompat.bytesio | stringio = pycompat.bytesio | ||||
class mpatchError(Exception): | class mpatchError(Exception): | ||||
"""error raised when a delta cannot be decoded | """error raised when a delta cannot be decoded""" | ||||
""" | |||||
# This attempts to apply a series of patches in time proportional to | # This attempts to apply a series of patches in time proportional to | ||||
# the total size of the patches, rather than patches * len(text). This | # the total size of the patches, rather than patches * len(text). This | ||||
# means rather than shuffling strings around, we shuffle around | # means rather than shuffling strings around, we shuffle around | ||||
# pointers to fragments with fragment lists. | # pointers to fragments with fragment lists. | ||||
# | # | ||||
# When the fragment lists get too long, we collapse them. To do this | # When the fragment lists get too long, we collapse them. To do this |
if statmod.S_ISFIFO(mode): | if statmod.S_ISFIFO(mode): | ||||
return statmod.S_IFIFO | return statmod.S_IFIFO | ||||
if statmod.S_ISSOCK(mode): | if statmod.S_ISSOCK(mode): | ||||
return statmod.S_IFSOCK | return statmod.S_IFSOCK | ||||
return mode | return mode | ||||
def listdir(path, stat=False, skip=None): | def listdir(path, stat=False, skip=None): | ||||
'''listdir(path, stat=False) -> list_of_tuples | """listdir(path, stat=False) -> list_of_tuples | ||||
Return a sorted list containing information about the entries | Return a sorted list containing information about the entries | ||||
in the directory. | in the directory. | ||||
If stat is True, each element is a 3-tuple: | If stat is True, each element is a 3-tuple: | ||||
(name, type, stat object) | (name, type, stat object) | ||||
Otherwise, each element is a 2-tuple: | Otherwise, each element is a 2-tuple: | ||||
(name, type) | (name, type) | ||||
''' | """ | ||||
result = [] | result = [] | ||||
prefix = path | prefix = path | ||||
if not prefix.endswith(pycompat.ossep): | if not prefix.endswith(pycompat.ossep): | ||||
prefix += pycompat.ossep | prefix += pycompat.ossep | ||||
names = os.listdir(path) | names = os.listdir(path) | ||||
names.sort() | names.sort() | ||||
for fn in names: | for fn in names: | ||||
st = os.lstat(prefix + fn) | st = os.lstat(prefix + fn) | ||||
def _raiseioerror(name): | def _raiseioerror(name): | ||||
err = ctypes.WinError() | err = ctypes.WinError() | ||||
raise IOError( | raise IOError( | ||||
err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror) | err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror) | ||||
) | ) | ||||
class posixfile(object): | class posixfile(object): | ||||
'''a file object aiming for POSIX-like semantics | """a file object aiming for POSIX-like semantics | ||||
CPython's open() returns a file that was opened *without* setting the | CPython's open() returns a file that was opened *without* setting the | ||||
_FILE_SHARE_DELETE flag, which causes rename and unlink to abort. | _FILE_SHARE_DELETE flag, which causes rename and unlink to abort. | ||||
This even happens if any hardlinked copy of the file is in open state. | This even happens if any hardlinked copy of the file is in open state. | ||||
We set _FILE_SHARE_DELETE here, so files opened with posixfile can be | We set _FILE_SHARE_DELETE here, so files opened with posixfile can be | ||||
renamed and deleted while they are held open. | renamed and deleted while they are held open. | ||||
Note that if a file opened with posixfile is unlinked, the file | Note that if a file opened with posixfile is unlinked, the file | ||||
remains but cannot be opened again or be recreated under the same name, | remains but cannot be opened again or be recreated under the same name, | ||||
until all reading processes have closed the file.''' | until all reading processes have closed the file.""" | ||||
def __init__(self, name, mode=b'r', bufsize=-1): | def __init__(self, name, mode=b'r', bufsize=-1): | ||||
if b'b' in mode: | if b'b' in mode: | ||||
flags = _O_BINARY | flags = _O_BINARY | ||||
else: | else: | ||||
flags = _O_TEXT | flags = _O_TEXT | ||||
m0 = mode[0:1] | m0 = mode[0:1] | ||||
def __iter__(self): | def __iter__(self): | ||||
return self._file | return self._file | ||||
def __getattr__(self, name): | def __getattr__(self, name): | ||||
return getattr(self._file, name) | return getattr(self._file, name) | ||||
def __setattr__(self, name, value): | def __setattr__(self, name, value): | ||||
'''mimics the read-only attributes of Python file objects | """mimics the read-only attributes of Python file objects | ||||
by raising 'TypeError: readonly attribute' if someone tries: | by raising 'TypeError: readonly attribute' if someone tries: | ||||
f = posixfile('foo.txt') | f = posixfile('foo.txt') | ||||
f.name = 'bla' | f.name = 'bla' | ||||
''' | """ | ||||
return self._file.__setattr__(name, value) | return self._file.__setattr__(name, value) | ||||
def __enter__(self): | def __enter__(self): | ||||
self._file.__enter__() | self._file.__enter__() | ||||
return self | return self | ||||
def __exit__(self, exc_type, exc_value, exc_tb): | def __exit__(self, exc_type, exc_value, exc_tb): | ||||
return self._file.__exit__(exc_type, exc_value, exc_tb) | return self._file.__exit__(exc_type, exc_value, exc_tb) |
def parse_index2(data, inline): | def parse_index2(data, inline): | ||||
if not inline: | if not inline: | ||||
return IndexObject(data), None | return IndexObject(data), None | ||||
return InlinedIndexObject(data, inline), (0, data) | return InlinedIndexObject(data, inline), (0, data) | ||||
def parse_index_devel_nodemap(data, inline): | def parse_index_devel_nodemap(data, inline): | ||||
"""like parse_index2, but alway return a PersistentNodeMapIndexObject | """like parse_index2, but alway return a PersistentNodeMapIndexObject""" | ||||
""" | |||||
return PersistentNodeMapIndexObject(data), None | return PersistentNodeMapIndexObject(data), None | ||||
def parse_dirstate(dmap, copymap, st): | def parse_dirstate(dmap, copymap, st): | ||||
parents = [st[:20], st[20:40]] | parents = [st[:20], st[20:40]] | ||||
# dereference fields so they will be local in loop | # dereference fields so they will be local in loop | ||||
format = b">cllll" | format = b">cllll" | ||||
e_size = struct.calcsize(format) | e_size = struct.calcsize(format) |
join = os.path.join | join = os.path.join | ||||
return sorted( | return sorted( | ||||
join(p, f) for f, k in util.listdir(p) if f.endswith(b'.rc') | join(p, f) for f, k in util.listdir(p) if f.endswith(b'.rc') | ||||
) | ) | ||||
return [p] | return [p] | ||||
def envrcitems(env=None): | def envrcitems(env=None): | ||||
'''Return [(section, name, value, source)] config items. | """Return [(section, name, value, source)] config items. | ||||
The config items are extracted from environment variables specified by env, | The config items are extracted from environment variables specified by env, | ||||
used to override systemrc, but not userrc. | used to override systemrc, but not userrc. | ||||
If env is not provided, encoding.environ will be used. | If env is not provided, encoding.environ will be used. | ||||
''' | """ | ||||
if env is None: | if env is None: | ||||
env = encoding.environ | env = encoding.environ | ||||
checklist = [ | checklist = [ | ||||
(b'EDITOR', b'ui', b'editor'), | (b'EDITOR', b'ui', b'editor'), | ||||
(b'VISUAL', b'ui', b'editor'), | (b'VISUAL', b'ui', b'editor'), | ||||
(b'PAGER', b'pager', b'pager'), | (b'PAGER', b'pager', b'pager'), | ||||
] | ] | ||||
result = [] | result = [] | ||||
(b'mercurial.defaultrc', r) | (b'mercurial.defaultrc', r) | ||||
for r in sorted(rsrcs) | for r in sorted(rsrcs) | ||||
if resourceutil.is_resource(b'mercurial.defaultrc', r) | if resourceutil.is_resource(b'mercurial.defaultrc', r) | ||||
and r.endswith(b'.rc') | and r.endswith(b'.rc') | ||||
] | ] | ||||
def rccomponents(): | def rccomponents(): | ||||
'''return an ordered [(type, obj)] about where to load configs. | """return an ordered [(type, obj)] about where to load configs. | ||||
respect $HGRCPATH. if $HGRCPATH is empty, only .hg/hgrc of current repo is | respect $HGRCPATH. if $HGRCPATH is empty, only .hg/hgrc of current repo is | ||||
used. if $HGRCPATH is not set, the platform default will be used. | used. if $HGRCPATH is not set, the platform default will be used. | ||||
if a directory is provided, *.rc files under it will be used. | if a directory is provided, *.rc files under it will be used. | ||||
type could be either 'path', 'items' or 'resource'. If type is 'path', | type could be either 'path', 'items' or 'resource'. If type is 'path', | ||||
obj is a string, and is the config file path. if type is 'items', obj is a | obj is a string, and is the config file path. if type is 'items', obj is a | ||||
list of (section, name, value, source) that should fill the config directly. | list of (section, name, value, source) that should fill the config directly. | ||||
If type is 'resource', obj is a tuple of (package name, resource name). | If type is 'resource', obj is a tuple of (package name, resource name). | ||||
''' | """ | ||||
envrc = (b'items', envrcitems()) | envrc = (b'items', envrcitems()) | ||||
if b'HGRCPATH' in encoding.environ: | if b'HGRCPATH' in encoding.environ: | ||||
# assume HGRCPATH is all about user configs so environments can be | # assume HGRCPATH is all about user configs so environments can be | ||||
# overridden. | # overridden. | ||||
_rccomponents = [envrc] | _rccomponents = [envrc] | ||||
for p in encoding.environ[b'HGRCPATH'].split(pycompat.ospathsep): | for p in encoding.environ[b'HGRCPATH'].split(pycompat.ospathsep): | ||||
if not p: | if not p: | ||||
continue | continue | ||||
_rccomponents.extend((b'path', p) for p in _expandrcpath(p)) | _rccomponents.extend((b'path', p) for p in _expandrcpath(p)) | ||||
else: | else: | ||||
_rccomponents = [(b'resource', r) for r in default_rc_resources()] | _rccomponents = [(b'resource', r) for r in default_rc_resources()] | ||||
normpaths = lambda paths: [ | normpaths = lambda paths: [ | ||||
(b'path', os.path.normpath(p)) for p in paths | (b'path', os.path.normpath(p)) for p in paths | ||||
] | ] | ||||
_rccomponents.extend(normpaths(systemrcpath())) | _rccomponents.extend(normpaths(systemrcpath())) | ||||
_rccomponents.append(envrc) | _rccomponents.append(envrc) | ||||
_rccomponents.extend(normpaths(userrcpath())) | _rccomponents.extend(normpaths(userrcpath())) | ||||
return _rccomponents | return _rccomponents | ||||
def defaultpagerenv(): | def defaultpagerenv(): | ||||
'''return a dict of default environment variables and their values, | """return a dict of default environment variables and their values, | ||||
intended to be set before starting a pager. | intended to be set before starting a pager. | ||||
''' | """ | ||||
return {b'LESS': b'FRX', b'LV': b'-c'} | return {b'LESS': b'FRX', b'LV': b'-c'} | ||||
def use_repo_hgrc(): | def use_repo_hgrc(): | ||||
"""True if repositories `.hg/hgrc` config should be read""" | """True if repositories `.hg/hgrc` config should be read""" | ||||
return b'HGRCSKIPREPO' not in encoding.environ | return b'HGRCSKIPREPO' not in encoding.environ |
if dups: | if dups: | ||||
msg = b'duplicate registration for names: "%s"' % b'", "'.join(dups) | msg = b'duplicate registration for names: "%s"' % b'", "'.join(dups) | ||||
raise error.ProgrammingError(msg) | raise error.ProgrammingError(msg) | ||||
self._table.update(registrarbase._table) | self._table.update(registrarbase._table) | ||||
def _parsefuncdecl(self, decl): | def _parsefuncdecl(self, decl): | ||||
"""Parse function declaration and return the name of function in it | """Parse function declaration and return the name of function in it""" | ||||
""" | |||||
i = decl.find(b'(') | i = decl.find(b'(') | ||||
if i >= 0: | if i >= 0: | ||||
return decl[:i] | return decl[:i] | ||||
else: | else: | ||||
return decl | return decl | ||||
def _getname(self, decl): | def _getname(self, decl): | ||||
"""Return the name of the registered function from decl | """Return the name of the registered function from decl | ||||
Derived class should override this, if it allows more | Derived class should override this, if it allows more | ||||
descriptive 'decl' string than just a name. | descriptive 'decl' string than just a name. | ||||
""" | """ | ||||
return decl | return decl | ||||
_docformat = None | _docformat = None | ||||
def _formatdoc(self, decl, doc): | def _formatdoc(self, decl, doc): | ||||
"""Return formatted document of the registered function for help | """Return formatted document of the registered function for help | ||||
'doc' is '__doc__.strip()' of the registered function. | 'doc' is '__doc__.strip()' of the registered function. | ||||
""" | """ | ||||
return self._docformat % (decl, doc) | return self._docformat % (decl, doc) | ||||
def _extrasetup(self, name, func): | def _extrasetup(self, name, func): | ||||
"""Execute extra setup for registered function, if needed | """Execute extra setup for registered function, if needed""" | ||||
""" | |||||
class command(_funcregistrarbase): | class command(_funcregistrarbase): | ||||
"""Decorator to register a command function to table | """Decorator to register a command function to table | ||||
This class receives a command table as its argument. The table should | This class receives a command table as its argument. The table should | ||||
be a dict. | be a dict. | ||||
_docformat = b"``%s``\n %s" | _docformat = b"``%s``\n %s" | ||||
def _extrasetup(self, name, func, callstatus=False, weight=1): | def _extrasetup(self, name, func, callstatus=False, weight=1): | ||||
func._callstatus = callstatus | func._callstatus = callstatus | ||||
func._weight = weight | func._weight = weight | ||||
class _templateregistrarbase(_funcregistrarbase): | class _templateregistrarbase(_funcregistrarbase): | ||||
"""Base of decorator to register functions as template specific one | """Base of decorator to register functions as template specific one""" | ||||
""" | |||||
_docformat = b":%s: %s" | _docformat = b":%s: %s" | ||||
class templatekeyword(_templateregistrarbase): | class templatekeyword(_templateregistrarbase): | ||||
"""Decorator to register template keyword | """Decorator to register template keyword | ||||
Usage:: | Usage:: |
assertions and lead to crashes.""" | assertions and lead to crashes.""" | ||||
obsoletes = obsolete.getrevs(repo, b'obsolete') | obsoletes = obsolete.getrevs(repo, b'obsolete') | ||||
internals = repo._phasecache.getrevset(repo, phases.localhiddenphases) | internals = repo._phasecache.getrevset(repo, phases.localhiddenphases) | ||||
internals = frozenset(internals) | internals = frozenset(internals) | ||||
return obsoletes | internals | return obsoletes | internals | ||||
def pinnedrevs(repo): | def pinnedrevs(repo): | ||||
"""revisions blocking hidden changesets from being filtered | """revisions blocking hidden changesets from being filtered""" | ||||
""" | |||||
cl = repo.changelog | cl = repo.changelog | ||||
pinned = set() | pinned = set() | ||||
pinned.update([par.rev() for par in repo[None].parents()]) | pinned.update([par.rev() for par in repo[None].parents()]) | ||||
pinned.update([cl.rev(bm) for bm in repo._bookmarks.values()]) | pinned.update([cl.rev(bm) for bm in repo._bookmarks.values()]) | ||||
tags = {} | tags = {} | ||||
tagsmod.readlocaltags(repo.ui, repo, tags, {}) | tagsmod.readlocaltags(repo.ui, repo, tags, {}) |
if maybewdir: | if maybewdir: | ||||
raise error.WdirUnsupported | raise error.WdirUnsupported | ||||
return None | return None | ||||
except TypeError: | except TypeError: | ||||
pass | pass | ||||
def lookup(self, id): | def lookup(self, id): | ||||
"""locate a node based on: | """locate a node based on: | ||||
- revision number or str(revision number) | - revision number or str(revision number) | ||||
- nodeid or subset of hex nodeid | - nodeid or subset of hex nodeid | ||||
""" | """ | ||||
n = self._match(id) | n = self._match(id) | ||||
if n is not None: | if n is not None: | ||||
return n | return n | ||||
n = self._partialmatch(id) | n = self._partialmatch(id) | ||||
if n: | if n: | ||||
return n | return n | ||||
if base == rev: | if base == rev: | ||||
return nullrev | return nullrev | ||||
elif self._generaldelta: | elif self._generaldelta: | ||||
return base | return base | ||||
else: | else: | ||||
return rev - 1 | return rev - 1 | ||||
def issnapshot(self, rev): | def issnapshot(self, rev): | ||||
"""tells whether rev is a snapshot | """tells whether rev is a snapshot""" | ||||
""" | |||||
if not self._sparserevlog: | if not self._sparserevlog: | ||||
return self.deltaparent(rev) == nullrev | return self.deltaparent(rev) == nullrev | ||||
elif util.safehasattr(self.index, b'issnapshot'): | elif util.safehasattr(self.index, b'issnapshot'): | ||||
# directly assign the method to cache the testing and access | # directly assign the method to cache the testing and access | ||||
self.issnapshot = self.index.issnapshot | self.issnapshot = self.index.issnapshot | ||||
return self.issnapshot(rev) | return self.issnapshot(rev) | ||||
if rev == nullrev: | if rev == nullrev: | ||||
return True | return True | ||||
# the temp file replace the real index when we exit the context | # the temp file replace the real index when we exit the context | ||||
# manager | # manager | ||||
tr.replace(self.indexfile, trindex * self._io.size) | tr.replace(self.indexfile, trindex * self._io.size) | ||||
nodemaputil.setup_persistent_nodemap(tr, self) | nodemaputil.setup_persistent_nodemap(tr, self) | ||||
self._chunkclear() | self._chunkclear() | ||||
def _nodeduplicatecallback(self, transaction, node): | def _nodeduplicatecallback(self, transaction, node): | ||||
"""called when trying to add a node already stored. | """called when trying to add a node already stored.""" | ||||
""" | |||||
def addrevision( | def addrevision( | ||||
self, | self, | ||||
text, | text, | ||||
transaction, | transaction, | ||||
link, | link, | ||||
p1, | p1, | ||||
p2, | p2, |
return # no need to register again | return # no need to register again | ||||
tr.addpending( | tr.addpending( | ||||
callback_id, lambda tr: _persist_nodemap(tr, revlog, pending=True) | callback_id, lambda tr: _persist_nodemap(tr, revlog, pending=True) | ||||
) | ) | ||||
tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog)) | tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog)) | ||||
class _NoTransaction(object): | class _NoTransaction(object): | ||||
"""transaction like object to update the nodemap outside a transaction | """transaction like object to update the nodemap outside a transaction""" | ||||
""" | |||||
def __init__(self): | def __init__(self): | ||||
self._postclose = {} | self._postclose = {} | ||||
def addpostclose(self, callback_id, callback_func): | def addpostclose(self, callback_id, callback_func): | ||||
self._postclose[callback_id] = callback_func | self._postclose[callback_id] = callback_func | ||||
def registertmp(self, *args, **kwargs): | def registertmp(self, *args, **kwargs): | ||||
notr = _NoTransaction() | notr = _NoTransaction() | ||||
_persist_nodemap(notr, revlog) | _persist_nodemap(notr, revlog) | ||||
for k in sorted(notr._postclose): | for k in sorted(notr._postclose): | ||||
notr._postclose[k](None) | notr._postclose[k](None) | ||||
def _persist_nodemap(tr, revlog, pending=False): | def _persist_nodemap(tr, revlog, pending=False): | ||||
"""Write nodemap data on disk for a given revlog | """Write nodemap data on disk for a given revlog""" | ||||
""" | |||||
if getattr(revlog, 'filteredrevs', ()): | if getattr(revlog, 'filteredrevs', ()): | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
"cannot persist nodemap of a filtered changelog" | "cannot persist nodemap of a filtered changelog" | ||||
) | ) | ||||
if revlog.nodemap_file is None: | if revlog.nodemap_file is None: | ||||
msg = "calling persist nodemap on a revlog without the feature enableb" | msg = "calling persist nodemap on a revlog without the feature enableb" | ||||
raise error.ProgrammingError(msg) | raise error.ProgrammingError(msg) | ||||
# | # | ||||
# The implementation focus on simplicity, not on performance. A Rust | # The implementation focus on simplicity, not on performance. A Rust | ||||
# implementation should provide a efficient version of the same binary | # implementation should provide a efficient version of the same binary | ||||
# persistence. This reference python implementation is never meant to be | # persistence. This reference python implementation is never meant to be | ||||
# extensively use in production. | # extensively use in production. | ||||
def persistent_data(index): | def persistent_data(index): | ||||
"""return the persistent binary form for a nodemap for a given index | """return the persistent binary form for a nodemap for a given index""" | ||||
""" | |||||
trie = _build_trie(index) | trie = _build_trie(index) | ||||
return _persist_trie(trie) | return _persist_trie(trie) | ||||
def update_persistent_data(index, root, max_idx, last_rev): | def update_persistent_data(index, root, max_idx, last_rev): | ||||
"""return the incremental update for persistent nodemap from a given index | """return the incremental update for persistent nodemap from a given index""" | ||||
""" | |||||
changed_block, trie = _update_trie(index, root, last_rev) | changed_block, trie = _update_trie(index, root, last_rev) | ||||
return ( | return ( | ||||
changed_block * S_BLOCK.size, | changed_block * S_BLOCK.size, | ||||
_persist_trie(trie, existing_idx=max_idx), | _persist_trie(trie, existing_idx=max_idx), | ||||
) | ) | ||||
S_BLOCK = struct.Struct(">" + ("l" * 16)) | S_BLOCK = struct.Struct(">" + ("l" * 16)) |
except error.WdirUnsupported: | except error.WdirUnsupported: | ||||
r = repo[r].p1().rev() | r = repo[r].p1().rev() | ||||
ps.add(r) | ps.add(r) | ||||
return subset & ps | return subset & ps | ||||
@predicate(b'author(string)', safe=True, weight=10) | @predicate(b'author(string)', safe=True, weight=10) | ||||
def author(repo, subset, x): | def author(repo, subset, x): | ||||
"""Alias for ``user(string)``. | """Alias for ``user(string)``.""" | ||||
""" | |||||
# i18n: "author" is a keyword | # i18n: "author" is a keyword | ||||
n = getstring(x, _(b"author requires a string")) | n = getstring(x, _(b"author requires a string")) | ||||
kind, pattern, matcher = _substringmatcher(n, casesensitive=False) | kind, pattern, matcher = _substringmatcher(n, casesensitive=False) | ||||
return subset.filter( | return subset.filter( | ||||
lambda x: matcher(repo[x].user()), condrepr=(b'<user %r>', n) | lambda x: matcher(repo[x].user()), condrepr=(b'<user %r>', n) | ||||
) | ) | ||||
cs.add(r) | cs.add(r) | ||||
if p2 != nullrev and p2 in parentset: | if p2 != nullrev and p2 in parentset: | ||||
cs.add(r) | cs.add(r) | ||||
return baseset(cs) | return baseset(cs) | ||||
@predicate(b'children(set)', safe=True) | @predicate(b'children(set)', safe=True) | ||||
def children(repo, subset, x): | def children(repo, subset, x): | ||||
"""Child changesets of changesets in set. | """Child changesets of changesets in set.""" | ||||
""" | |||||
s = getset(repo, fullreposet(repo), x) | s = getset(repo, fullreposet(repo), x) | ||||
cs = _children(repo, subset, s) | cs = _children(repo, subset, s) | ||||
return subset & cs | return subset & cs | ||||
@predicate(b'closed()', safe=True, weight=10) | @predicate(b'closed()', safe=True, weight=10) | ||||
def closed(repo, subset, x): | def closed(repo, subset, x): | ||||
"""Changeset is closed. | """Changeset is closed.""" | ||||
""" | |||||
# i18n: "closed" is a keyword | # i18n: "closed" is a keyword | ||||
getargs(x, 0, 0, _(b"closed takes no arguments")) | getargs(x, 0, 0, _(b"closed takes no arguments")) | ||||
return subset.filter( | return subset.filter( | ||||
lambda r: repo[r].closesbranch(), condrepr=b'<branch closed>' | lambda r: repo[r].closesbranch(), condrepr=b'<branch closed>' | ||||
) | ) | ||||
# for internal use | # for internal use | ||||
@predicate(b'_commonancestorheads(set)', safe=True) | @predicate(b'_commonancestorheads(set)', safe=True) | ||||
def _commonancestorheads(repo, subset, x): | def _commonancestorheads(repo, subset, x): | ||||
# This is an internal method is for quickly calculating "heads(::x and | # This is an internal method is for quickly calculating "heads(::x and | ||||
# ::y)" | # ::y)" | ||||
# These greatest common ancestors are the same ones that the consensus bid | # These greatest common ancestors are the same ones that the consensus bid | ||||
# merge will find. | # merge will find. | ||||
startrevs = getset(repo, fullreposet(repo), x, order=anyorder) | startrevs = getset(repo, fullreposet(repo), x, order=anyorder) | ||||
ancs = repo.changelog._commonancestorsheads(*list(startrevs)) | ancs = repo.changelog._commonancestorsheads(*list(startrevs)) | ||||
return subset & baseset(ancs) | return subset & baseset(ancs) | ||||
@predicate(b'commonancestors(set)', safe=True) | @predicate(b'commonancestors(set)', safe=True) | ||||
def commonancestors(repo, subset, x): | def commonancestors(repo, subset, x): | ||||
"""Changesets that are ancestors of every changeset in set. | """Changesets that are ancestors of every changeset in set.""" | ||||
""" | |||||
startrevs = getset(repo, fullreposet(repo), x, order=anyorder) | startrevs = getset(repo, fullreposet(repo), x, order=anyorder) | ||||
if not startrevs: | if not startrevs: | ||||
return baseset() | return baseset() | ||||
for r in startrevs: | for r in startrevs: | ||||
subset &= dagop.revancestors(repo, baseset([r])) | subset &= dagop.revancestors(repo, baseset([r])) | ||||
return subset | return subset | ||||
return subset.filter( | return subset.filter( | ||||
lambda r: _matchvalue(r), condrepr=(b'<converted %r>', rev) | lambda r: _matchvalue(r), condrepr=(b'<converted %r>', rev) | ||||
) | ) | ||||
@predicate(b'date(interval)', safe=True, weight=10) | @predicate(b'date(interval)', safe=True, weight=10) | ||||
def date(repo, subset, x): | def date(repo, subset, x): | ||||
"""Changesets within the interval, see :hg:`help dates`. | """Changesets within the interval, see :hg:`help dates`.""" | ||||
""" | |||||
# i18n: "date" is a keyword | # i18n: "date" is a keyword | ||||
ds = getstring(x, _(b"date requires a string")) | ds = getstring(x, _(b"date requires a string")) | ||||
dm = dateutil.matchdate(ds) | dm = dateutil.matchdate(ds) | ||||
return subset.filter( | return subset.filter( | ||||
lambda x: dm(repo[x].date()[0]), condrepr=(b'<date %r>', ds) | lambda x: dm(repo[x].date()[0]), condrepr=(b'<date %r>', ds) | ||||
) | ) | ||||
_(b'extdata takes at least 1 string argument'), | _(b'extdata takes at least 1 string argument'), | ||||
) | ) | ||||
data = scmutil.extdatasource(repo, source) | data = scmutil.extdatasource(repo, source) | ||||
return subset & baseset(data) | return subset & baseset(data) | ||||
@predicate(b'extinct()', safe=True) | @predicate(b'extinct()', safe=True) | ||||
def extinct(repo, subset, x): | def extinct(repo, subset, x): | ||||
"""Obsolete changesets with obsolete descendants only. (EXPERIMENTAL) | """Obsolete changesets with obsolete descendants only. (EXPERIMENTAL)""" | ||||
""" | |||||
# i18n: "extinct" is a keyword | # i18n: "extinct" is a keyword | ||||
getargs(x, 0, 0, _(b"extinct takes no arguments")) | getargs(x, 0, 0, _(b"extinct takes no arguments")) | ||||
extincts = obsmod.getrevs(repo, b'extinct') | extincts = obsmod.getrevs(repo, b'extinct') | ||||
return subset & extincts | return subset & extincts | ||||
@predicate(b'extra(label, [value])', safe=True) | @predicate(b'extra(label, [value])', safe=True) | ||||
def extra(repo, subset, x): | def extra(repo, subset, x): | ||||
# deletion in changelog | # deletion in changelog | ||||
continue | continue | ||||
return subset & s | return subset & s | ||||
@predicate(b'first(set, [n])', safe=True, takeorder=True, weight=0) | @predicate(b'first(set, [n])', safe=True, takeorder=True, weight=0) | ||||
def first(repo, subset, x, order): | def first(repo, subset, x, order): | ||||
"""An alias for limit(). | """An alias for limit().""" | ||||
""" | |||||
return limit(repo, subset, x, order) | return limit(repo, subset, x, order) | ||||
def _follow(repo, subset, x, name, followfirst=False): | def _follow(repo, subset, x, name, followfirst=False): | ||||
args = getargsdict(x, name, b'file startrev') | args = getargsdict(x, name, b'file startrev') | ||||
revs = None | revs = None | ||||
if b'startrev' in args: | if b'startrev' in args: | ||||
revs = getset(repo, fullreposet(repo), args[b'startrev']) | revs = getset(repo, fullreposet(repo), args[b'startrev']) | ||||
), | ), | ||||
iterasc=False, | iterasc=False, | ||||
) | ) | ||||
return subset & rs | return subset & rs | ||||
@predicate(b'all()', safe=True) | @predicate(b'all()', safe=True) | ||||
def getall(repo, subset, x): | def getall(repo, subset, x): | ||||
"""All changesets, the same as ``0:tip``. | """All changesets, the same as ``0:tip``.""" | ||||
""" | |||||
# i18n: "all" is a keyword | # i18n: "all" is a keyword | ||||
getargs(x, 0, 0, _(b"all takes no arguments")) | getargs(x, 0, 0, _(b"all takes no arguments")) | ||||
return subset & spanset(repo) # drop "null" if any | return subset & spanset(repo) # drop "null" if any | ||||
@predicate(b'grep(regex)', weight=10) | @predicate(b'grep(regex)', weight=10) | ||||
def grep(repo, subset, x): | def grep(repo, subset, x): | ||||
"""Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')`` | """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')`` | ||||
""" | """ | ||||
# i18n: "file" is a keyword | # i18n: "file" is a keyword | ||||
pat = getstring(x, _(b"file requires a pattern")) | pat = getstring(x, _(b"file requires a pattern")) | ||||
return _matchfiles(repo, subset, (b'string', b'p:' + pat)) | return _matchfiles(repo, subset, (b'string', b'p:' + pat)) | ||||
@predicate(b'head()', safe=True) | @predicate(b'head()', safe=True) | ||||
def head(repo, subset, x): | def head(repo, subset, x): | ||||
"""Changeset is a named branch head. | """Changeset is a named branch head.""" | ||||
""" | |||||
# i18n: "head" is a keyword | # i18n: "head" is a keyword | ||||
getargs(x, 0, 0, _(b"head takes no arguments")) | getargs(x, 0, 0, _(b"head takes no arguments")) | ||||
hs = set() | hs = set() | ||||
cl = repo.changelog | cl = repo.changelog | ||||
for ls in repo.branchmap().iterheads(): | for ls in repo.branchmap().iterheads(): | ||||
hs.update(cl.rev(h) for h in ls) | hs.update(cl.rev(h) for h in ls) | ||||
return subset & baseset(hs) | return subset & baseset(hs) | ||||
@predicate(b'heads(set)', safe=True, takeorder=True) | @predicate(b'heads(set)', safe=True, takeorder=True) | ||||
def heads(repo, subset, x, order): | def heads(repo, subset, x, order): | ||||
"""Members of set with no children in set. | """Members of set with no children in set.""" | ||||
""" | |||||
# argument set should never define order | # argument set should never define order | ||||
if order == defineorder: | if order == defineorder: | ||||
order = followorder | order = followorder | ||||
inputset = getset(repo, fullreposet(repo), x, order=order) | inputset = getset(repo, fullreposet(repo), x, order=order) | ||||
wdirparents = None | wdirparents = None | ||||
if node.wdirrev in inputset: | if node.wdirrev in inputset: | ||||
# a bit slower, but not common so good enough for now | # a bit slower, but not common so good enough for now | ||||
wdirparents = [p.rev() for p in repo[None].parents()] | wdirparents = [p.rev() for p in repo[None].parents()] | ||||
inputset = set(inputset) | inputset = set(inputset) | ||||
inputset.discard(node.wdirrev) | inputset.discard(node.wdirrev) | ||||
heads = repo.changelog.headrevs(inputset) | heads = repo.changelog.headrevs(inputset) | ||||
if wdirparents is not None: | if wdirparents is not None: | ||||
heads.difference_update(wdirparents) | heads.difference_update(wdirparents) | ||||
heads.add(node.wdirrev) | heads.add(node.wdirrev) | ||||
heads = baseset(heads) | heads = baseset(heads) | ||||
return subset & heads | return subset & heads | ||||
@predicate(b'hidden()', safe=True) | @predicate(b'hidden()', safe=True) | ||||
def hidden(repo, subset, x): | def hidden(repo, subset, x): | ||||
"""Hidden changesets. | """Hidden changesets.""" | ||||
""" | |||||
# i18n: "hidden" is a keyword | # i18n: "hidden" is a keyword | ||||
getargs(x, 0, 0, _(b"hidden takes no arguments")) | getargs(x, 0, 0, _(b"hidden takes no arguments")) | ||||
hiddenrevs = repoview.filterrevs(repo, b'visible') | hiddenrevs = repoview.filterrevs(repo, b'visible') | ||||
return subset & hiddenrevs | return subset & hiddenrevs | ||||
@predicate(b'keyword(string)', safe=True, weight=10) | @predicate(b'keyword(string)', safe=True, weight=10) | ||||
def keyword(repo, subset, x): | def keyword(repo, subset, x): | ||||
for t in c.files() + [c.user(), c.description()] | for t in c.files() + [c.user(), c.description()] | ||||
) | ) | ||||
return subset.filter(matches, condrepr=(b'<keyword %r>', kw)) | return subset.filter(matches, condrepr=(b'<keyword %r>', kw)) | ||||
@predicate(b'limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0) | @predicate(b'limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0) | ||||
def limit(repo, subset, x, order): | def limit(repo, subset, x, order): | ||||
"""First n members of set, defaulting to 1, starting from offset. | """First n members of set, defaulting to 1, starting from offset.""" | ||||
""" | |||||
args = getargsdict(x, b'limit', b'set n offset') | args = getargsdict(x, b'limit', b'set n offset') | ||||
if b'set' not in args: | if b'set' not in args: | ||||
# i18n: "limit" is a keyword | # i18n: "limit" is a keyword | ||||
raise error.ParseError(_(b"limit requires one to three arguments")) | raise error.ParseError(_(b"limit requires one to three arguments")) | ||||
# i18n: "limit" is a keyword | # i18n: "limit" is a keyword | ||||
lim = getinteger(args.get(b'n'), _(b"limit expects a number"), default=1) | lim = getinteger(args.get(b'n'), _(b"limit expects a number"), default=1) | ||||
if lim < 0: | if lim < 0: | ||||
raise error.ParseError(_(b"negative number to select")) | raise error.ParseError(_(b"negative number to select")) | ||||
# i18n: "limit" is a keyword | # i18n: "limit" is a keyword | ||||
ofs = getinteger( | ofs = getinteger( | ||||
args.get(b'offset'), _(b"limit expects a number"), default=0 | args.get(b'offset'), _(b"limit expects a number"), default=0 | ||||
) | ) | ||||
if ofs < 0: | if ofs < 0: | ||||
raise error.ParseError(_(b"negative offset")) | raise error.ParseError(_(b"negative offset")) | ||||
os = getset(repo, fullreposet(repo), args[b'set']) | os = getset(repo, fullreposet(repo), args[b'set']) | ||||
ls = os.slice(ofs, ofs + lim) | ls = os.slice(ofs, ofs + lim) | ||||
if order == followorder and lim > 1: | if order == followorder and lim > 1: | ||||
return subset & ls | return subset & ls | ||||
return ls & subset | return ls & subset | ||||
@predicate(b'last(set, [n])', safe=True, takeorder=True) | @predicate(b'last(set, [n])', safe=True, takeorder=True) | ||||
def last(repo, subset, x, order): | def last(repo, subset, x, order): | ||||
"""Last n members of set, defaulting to 1. | """Last n members of set, defaulting to 1.""" | ||||
""" | |||||
# i18n: "last" is a keyword | # i18n: "last" is a keyword | ||||
l = getargs(x, 1, 2, _(b"last requires one or two arguments")) | l = getargs(x, 1, 2, _(b"last requires one or two arguments")) | ||||
lim = 1 | lim = 1 | ||||
if len(l) == 2: | if len(l) == 2: | ||||
# i18n: "last" is a keyword | # i18n: "last" is a keyword | ||||
lim = getinteger(l[1], _(b"last expects a number")) | lim = getinteger(l[1], _(b"last expects a number")) | ||||
if lim < 0: | if lim < 0: | ||||
raise error.ParseError(_(b"negative number to select")) | raise error.ParseError(_(b"negative number to select")) | ||||
os = getset(repo, fullreposet(repo), l[0]) | os = getset(repo, fullreposet(repo), l[0]) | ||||
os.reverse() | os.reverse() | ||||
ls = os.slice(0, lim) | ls = os.slice(0, lim) | ||||
if order == followorder and lim > 1: | if order == followorder and lim > 1: | ||||
return subset & ls | return subset & ls | ||||
ls.reverse() | ls.reverse() | ||||
return ls & subset | return ls & subset | ||||
@predicate(b'max(set)', safe=True) | @predicate(b'max(set)', safe=True) | ||||
def maxrev(repo, subset, x): | def maxrev(repo, subset, x): | ||||
"""Changeset with highest revision number in set. | """Changeset with highest revision number in set.""" | ||||
""" | |||||
os = getset(repo, fullreposet(repo), x) | os = getset(repo, fullreposet(repo), x) | ||||
try: | try: | ||||
m = os.max() | m = os.max() | ||||
if m in subset: | if m in subset: | ||||
return baseset([m], datarepr=(b'<max %r, %r>', subset, os)) | return baseset([m], datarepr=(b'<max %r, %r>', subset, os)) | ||||
except ValueError: | except ValueError: | ||||
# os.max() throws a ValueError when the collection is empty. | # os.max() throws a ValueError when the collection is empty. | ||||
# Same as python's max(). | # Same as python's max(). | ||||
pass | pass | ||||
return baseset(datarepr=(b'<max %r, %r>', subset, os)) | return baseset(datarepr=(b'<max %r, %r>', subset, os)) | ||||
@predicate(b'merge()', safe=True) | @predicate(b'merge()', safe=True) | ||||
def merge(repo, subset, x): | def merge(repo, subset, x): | ||||
"""Changeset is a merge changeset. | """Changeset is a merge changeset.""" | ||||
""" | |||||
# i18n: "merge" is a keyword | # i18n: "merge" is a keyword | ||||
getargs(x, 0, 0, _(b"merge takes no arguments")) | getargs(x, 0, 0, _(b"merge takes no arguments")) | ||||
cl = repo.changelog | cl = repo.changelog | ||||
nullrev = node.nullrev | nullrev = node.nullrev | ||||
def ismerge(r): | def ismerge(r): | ||||
try: | try: | ||||
return cl.parentrevs(r)[1] != nullrev | return cl.parentrevs(r)[1] != nullrev | ||||
except error.WdirUnsupported: | except error.WdirUnsupported: | ||||
return bool(repo[r].p2()) | return bool(repo[r].p2()) | ||||
return subset.filter(ismerge, condrepr=b'<merge>') | return subset.filter(ismerge, condrepr=b'<merge>') | ||||
@predicate(b'branchpoint()', safe=True) | @predicate(b'branchpoint()', safe=True) | ||||
def branchpoint(repo, subset, x): | def branchpoint(repo, subset, x): | ||||
"""Changesets with more than one child. | """Changesets with more than one child.""" | ||||
""" | |||||
# i18n: "branchpoint" is a keyword | # i18n: "branchpoint" is a keyword | ||||
getargs(x, 0, 0, _(b"branchpoint takes no arguments")) | getargs(x, 0, 0, _(b"branchpoint takes no arguments")) | ||||
cl = repo.changelog | cl = repo.changelog | ||||
if not subset: | if not subset: | ||||
return baseset() | return baseset() | ||||
# XXX this should be 'parentset.min()' assuming 'parentset' is a smartset | # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset | ||||
# (and if it is not, it should.) | # (and if it is not, it should.) | ||||
baserev = min(subset) | baserev = min(subset) | ||||
parentscount = [0] * (len(repo) - baserev) | parentscount = [0] * (len(repo) - baserev) | ||||
for r in cl.revs(start=baserev + 1): | for r in cl.revs(start=baserev + 1): | ||||
for p in cl.parentrevs(r): | for p in cl.parentrevs(r): | ||||
if p >= baserev: | if p >= baserev: | ||||
parentscount[p - baserev] += 1 | parentscount[p - baserev] += 1 | ||||
return subset.filter( | return subset.filter( | ||||
lambda r: parentscount[r - baserev] > 1, condrepr=b'<branchpoint>' | lambda r: parentscount[r - baserev] > 1, condrepr=b'<branchpoint>' | ||||
) | ) | ||||
@predicate(b'min(set)', safe=True) | @predicate(b'min(set)', safe=True) | ||||
def minrev(repo, subset, x): | def minrev(repo, subset, x): | ||||
"""Changeset with lowest revision number in set. | """Changeset with lowest revision number in set.""" | ||||
""" | |||||
os = getset(repo, fullreposet(repo), x) | os = getset(repo, fullreposet(repo), x) | ||||
try: | try: | ||||
m = os.min() | m = os.min() | ||||
if m in subset: | if m in subset: | ||||
return baseset([m], datarepr=(b'<min %r, %r>', subset, os)) | return baseset([m], datarepr=(b'<min %r, %r>', subset, os)) | ||||
except ValueError: | except ValueError: | ||||
# os.min() throws a ValueError when the collection is empty. | # os.min() throws a ValueError when the collection is empty. | ||||
# Same as python's min(). | # Same as python's min(). | ||||
names.update(repo[n].rev() for n in ns.nodes(repo, name)) | names.update(repo[n].rev() for n in ns.nodes(repo, name)) | ||||
names -= {node.nullrev} | names -= {node.nullrev} | ||||
return subset & names | return subset & names | ||||
@predicate(b'id(string)', safe=True) | @predicate(b'id(string)', safe=True) | ||||
def node_(repo, subset, x): | def node_(repo, subset, x): | ||||
"""Revision non-ambiguously specified by the given hex string prefix. | """Revision non-ambiguously specified by the given hex string prefix.""" | ||||
""" | |||||
# i18n: "id" is a keyword | # i18n: "id" is a keyword | ||||
l = getargs(x, 1, 1, _(b"id requires one argument")) | l = getargs(x, 1, 1, _(b"id requires one argument")) | ||||
# i18n: "id" is a keyword | # i18n: "id" is a keyword | ||||
n = getstring(l[0], _(b"id requires a string")) | n = getstring(l[0], _(b"id requires a string")) | ||||
if len(n) == 40: | if len(n) == 40: | ||||
try: | try: | ||||
rn = repo.changelog.rev(node.bin(n)) | rn = repo.changelog.rev(node.bin(n)) | ||||
except error.WdirUnsupported: | except error.WdirUnsupported: | ||||
if rn is None: | if rn is None: | ||||
return baseset() | return baseset() | ||||
result = baseset([rn]) | result = baseset([rn]) | ||||
return result & subset | return result & subset | ||||
@predicate(b'none()', safe=True) | @predicate(b'none()', safe=True) | ||||
def none(repo, subset, x): | def none(repo, subset, x): | ||||
"""No changesets. | """No changesets.""" | ||||
""" | |||||
# i18n: "none" is a keyword | # i18n: "none" is a keyword | ||||
getargs(x, 0, 0, _(b"none takes no arguments")) | getargs(x, 0, 0, _(b"none takes no arguments")) | ||||
return baseset() | return baseset() | ||||
@predicate(b'obsolete()', safe=True) | @predicate(b'obsolete()', safe=True) | ||||
def obsolete(repo, subset, x): | def obsolete(repo, subset, x): | ||||
"""Mutable changeset with a newer version. (EXPERIMENTAL)""" | """Mutable changeset with a newer version. (EXPERIMENTAL)""" | ||||
repo.ui.popbuffer() | repo.ui.popbuffer() | ||||
cl = repo.changelog | cl = repo.changelog | ||||
o = {cl.rev(r) for r in outgoing.missing} | o = {cl.rev(r) for r in outgoing.missing} | ||||
return subset & o | return subset & o | ||||
@predicate(b'p1([set])', safe=True) | @predicate(b'p1([set])', safe=True) | ||||
def p1(repo, subset, x): | def p1(repo, subset, x): | ||||
"""First parent of changesets in set, or the working directory. | """First parent of changesets in set, or the working directory.""" | ||||
""" | |||||
if x is None: | if x is None: | ||||
p = repo[x].p1().rev() | p = repo[x].p1().rev() | ||||
if p >= 0: | if p >= 0: | ||||
return subset & baseset([p]) | return subset & baseset([p]) | ||||
return baseset() | return baseset() | ||||
ps = set() | ps = set() | ||||
cl = repo.changelog | cl = repo.changelog | ||||
for r in getset(repo, fullreposet(repo), x): | for r in getset(repo, fullreposet(repo), x): | ||||
try: | try: | ||||
ps.add(cl.parentrevs(r)[0]) | ps.add(cl.parentrevs(r)[0]) | ||||
except error.WdirUnsupported: | except error.WdirUnsupported: | ||||
ps.add(repo[r].p1().rev()) | ps.add(repo[r].p1().rev()) | ||||
ps -= {node.nullrev} | ps -= {node.nullrev} | ||||
# XXX we should turn this into a baseset instead of a set, smartset may do | # XXX we should turn this into a baseset instead of a set, smartset may do | ||||
# some optimizations from the fact this is a baseset. | # some optimizations from the fact this is a baseset. | ||||
return subset & ps | return subset & ps | ||||
@predicate(b'p2([set])', safe=True) | @predicate(b'p2([set])', safe=True) | ||||
def p2(repo, subset, x): | def p2(repo, subset, x): | ||||
"""Second parent of changesets in set, or the working directory. | """Second parent of changesets in set, or the working directory.""" | ||||
""" | |||||
if x is None: | if x is None: | ||||
ps = repo[x].parents() | ps = repo[x].parents() | ||||
try: | try: | ||||
p = ps[1].rev() | p = ps[1].rev() | ||||
if p >= 0: | if p >= 0: | ||||
return subset & baseset([p]) | return subset & baseset([p]) | ||||
return baseset() | return baseset() | ||||
except IndexError: | except IndexError: | ||||
return True | return True | ||||
return False | return False | ||||
return subset.filter(matches, condrepr=(b'<matching%r %r>', fields, revs)) | return subset.filter(matches, condrepr=(b'<matching%r %r>', fields, revs)) | ||||
@predicate(b'reverse(set)', safe=True, takeorder=True, weight=0) | @predicate(b'reverse(set)', safe=True, takeorder=True, weight=0) | ||||
def reverse(repo, subset, x, order): | def reverse(repo, subset, x, order): | ||||
"""Reverse order of set. | """Reverse order of set.""" | ||||
""" | |||||
l = getset(repo, subset, x, order) | l = getset(repo, subset, x, order) | ||||
if order == defineorder: | if order == defineorder: | ||||
l.reverse() | l.reverse() | ||||
return l | return l | ||||
@predicate(b'roots(set)', safe=True) | @predicate(b'roots(set)', safe=True) | ||||
def roots(repo, subset, x): | def roots(repo, subset, x): | ||||
"""Changesets in set with no parent changeset in set. | """Changesets in set with no parent changeset in set.""" | ||||
""" | |||||
s = getset(repo, fullreposet(repo), x) | s = getset(repo, fullreposet(repo), x) | ||||
parents = repo.changelog.parentrevs | parents = repo.changelog.parentrevs | ||||
def filter(r): | def filter(r): | ||||
for p in parents(r): | for p in parents(r): | ||||
if 0 <= p and p in s: | if 0 <= p and p in s: | ||||
return False | return False | ||||
return True | return True | ||||
@predicate(b'tagged', safe=True) | @predicate(b'tagged', safe=True) | ||||
def tagged(repo, subset, x): | def tagged(repo, subset, x): | ||||
return tag(repo, subset, x) | return tag(repo, subset, x) | ||||
@predicate(b'orphan()', safe=True) | @predicate(b'orphan()', safe=True) | ||||
def orphan(repo, subset, x): | def orphan(repo, subset, x): | ||||
"""Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL) | """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)""" | ||||
""" | |||||
# i18n: "orphan" is a keyword | # i18n: "orphan" is a keyword | ||||
getargs(x, 0, 0, _(b"orphan takes no arguments")) | getargs(x, 0, 0, _(b"orphan takes no arguments")) | ||||
orphan = obsmod.getrevs(repo, b'orphan') | orphan = obsmod.getrevs(repo, b'orphan') | ||||
return subset & orphan | return subset & orphan | ||||
@predicate(b'unstable()', safe=True) | @predicate(b'unstable()', safe=True) | ||||
def unstable(repo, subset, x): | def unstable(repo, subset, x): | ||||
"""Changesets with instabilities. (EXPERIMENTAL) | """Changesets with instabilities. (EXPERIMENTAL)""" | ||||
""" | |||||
# i18n: "unstable" is a keyword | # i18n: "unstable" is a keyword | ||||
getargs(x, 0, 0, b'unstable takes no arguments') | getargs(x, 0, 0, b'unstable takes no arguments') | ||||
_unstable = set() | _unstable = set() | ||||
_unstable.update(obsmod.getrevs(repo, b'orphan')) | _unstable.update(obsmod.getrevs(repo, b'orphan')) | ||||
_unstable.update(obsmod.getrevs(repo, b'phasedivergent')) | _unstable.update(obsmod.getrevs(repo, b'phasedivergent')) | ||||
_unstable.update(obsmod.getrevs(repo, b'contentdivergent')) | _unstable.update(obsmod.getrevs(repo, b'contentdivergent')) | ||||
return subset & baseset(_unstable) | return subset & baseset(_unstable) | ||||
if subset is None: | if subset is None: | ||||
subset = fullreposet(repo) | subset = fullreposet(repo) | ||||
return getset(repo, subset, tree, order) | return getset(repo, subset, tree, order) | ||||
return mfunc | return mfunc | ||||
def loadpredicate(ui, extname, registrarobj): | def loadpredicate(ui, extname, registrarobj): | ||||
"""Load revset predicates from specified registrarobj | """Load revset predicates from specified registrarobj""" | ||||
""" | |||||
for name, func in pycompat.iteritems(registrarobj._table): | for name, func in pycompat.iteritems(registrarobj._table): | ||||
symbols[name] = func | symbols[name] = func | ||||
if func._safe: | if func._safe: | ||||
safesymbols.add(name) | safesymbols.add(name) | ||||
# load built-in predicates explicitly to setup safesymbols | # load built-in predicates explicitly to setup safesymbols | ||||
loadpredicate(None, None, predicate) | loadpredicate(None, None, predicate) | ||||
# tell hggettext to extract docstrings from these functions: | # tell hggettext to extract docstrings from these functions: | ||||
i18nfunctions = symbols.values() | i18nfunctions = symbols.values() |
) | ) | ||||
) | set(map(pycompat.bytechr, pycompat.xrange(128, 256))) | ) | set(map(pycompat.bytechr, pycompat.xrange(128, 256))) | ||||
# default set of valid characters for non-initial letters of symbols | # default set of valid characters for non-initial letters of symbols | ||||
_symletters = _syminitletters | set(pycompat.iterbytestr(b'-/')) | _symletters = _syminitletters | set(pycompat.iterbytestr(b'-/')) | ||||
def tokenize(program, lookup=None, syminitletters=None, symletters=None): | def tokenize(program, lookup=None, syminitletters=None, symletters=None): | ||||
''' | """ | ||||
Parse a revset statement into a stream of tokens | Parse a revset statement into a stream of tokens | ||||
``syminitletters`` is the set of valid characters for the initial | ``syminitletters`` is the set of valid characters for the initial | ||||
letter of symbols. | letter of symbols. | ||||
By default, character ``c`` is recognized as valid for initial | By default, character ``c`` is recognized as valid for initial | ||||
letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``. | letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``. | ||||
``symletters`` is the set of valid characters for non-initial | ``symletters`` is the set of valid characters for non-initial | ||||
letters of symbols. | letters of symbols. | ||||
By default, character ``c`` is recognized as valid for non-initial | By default, character ``c`` is recognized as valid for non-initial | ||||
letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``. | letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``. | ||||
Check that @ is a valid unquoted token character (issue3686): | Check that @ is a valid unquoted token character (issue3686): | ||||
>>> list(tokenize(b"@::")) | >>> list(tokenize(b"@::")) | ||||
[('symbol', '@', 0), ('::', None, 1), ('end', None, 3)] | [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)] | ||||
''' | """ | ||||
if not isinstance(program, bytes): | if not isinstance(program, bytes): | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'revset statement must be bytes, got %r' % program | b'revset statement must be bytes, got %r' % program | ||||
) | ) | ||||
program = pycompat.bytestr(program) | program = pycompat.bytestr(program) | ||||
if syminitletters is None: | if syminitletters is None: | ||||
syminitletters = _syminitletters | syminitletters = _syminitletters | ||||
if symletters is None: | if symletters is None: | ||||
for name, alias in sorted(pycompat.iteritems(aliases)): | for name, alias in sorted(pycompat.iteritems(aliases)): | ||||
if alias.error and not alias.warned: | if alias.error and not alias.warned: | ||||
warn(_(b'warning: %s\n') % (alias.error)) | warn(_(b'warning: %s\n') % (alias.error)) | ||||
alias.warned = True | alias.warned = True | ||||
return tree | return tree | ||||
def foldconcat(tree): | def foldconcat(tree): | ||||
"""Fold elements to be concatenated by `##` | """Fold elements to be concatenated by `##`""" | ||||
""" | |||||
if not isinstance(tree, tuple) or tree[0] in ( | if not isinstance(tree, tuple) or tree[0] in ( | ||||
b'string', | b'string', | ||||
b'symbol', | b'symbol', | ||||
b'smartset', | b'smartset', | ||||
): | ): | ||||
return tree | return tree | ||||
if tree[0] == b'_concat': | if tree[0] == b'_concat': | ||||
pending = [tree] | pending = [tree] | ||||
_formatlistfuncs = { | _formatlistfuncs = { | ||||
b'l': _formatlistexp, | b'l': _formatlistexp, | ||||
b'p': _formatparamexp, | b'p': _formatparamexp, | ||||
} | } | ||||
def formatspec(expr, *args): | def formatspec(expr, *args): | ||||
''' | """ | ||||
This is a convenience function for using revsets internally, and | This is a convenience function for using revsets internally, and | ||||
escapes arguments appropriately. Aliases are intentionally ignored | escapes arguments appropriately. Aliases are intentionally ignored | ||||
so that intended expression behavior isn't accidentally subverted. | so that intended expression behavior isn't accidentally subverted. | ||||
Supported arguments: | Supported arguments: | ||||
%r = revset expression, parenthesized | %r = revset expression, parenthesized | ||||
%d = rev(int(arg)), no quoting | %d = rev(int(arg)), no quoting | ||||
>>> formatspec(b'branch(%b)', b) | >>> formatspec(b'branch(%b)', b) | ||||
"branch('default')" | "branch('default')" | ||||
>>> formatspec(b'root(%ls)', [b'a', b'b', b'c', b'd']) | >>> formatspec(b'root(%ls)', [b'a', b'b', b'c', b'd']) | ||||
"root(_list('a\\\\x00b\\\\x00c\\\\x00d'))" | "root(_list('a\\\\x00b\\\\x00c\\\\x00d'))" | ||||
>>> formatspec(b'sort(%r, %ps)', b':', [b'desc', b'user']) | >>> formatspec(b'sort(%r, %ps)', b':', [b'desc', b'user']) | ||||
"sort((:), 'desc', 'user')" | "sort((:), 'desc', 'user')" | ||||
>>> formatspec(b'%ls', [b'a', b"'"]) | >>> formatspec(b'%ls', [b'a', b"'"]) | ||||
"_list('a\\\\x00\\\\'')" | "_list('a\\\\x00\\\\'')" | ||||
''' | """ | ||||
parsed = _parseargs(expr, args) | parsed = _parseargs(expr, args) | ||||
ret = [] | ret = [] | ||||
for t, arg in parsed: | for t, arg in parsed: | ||||
if t is None: | if t is None: | ||||
ret.append(arg) | ret.append(arg) | ||||
elif t == b'baseset': | elif t == b'baseset': | ||||
if isinstance(arg, set): | if isinstance(arg, set): | ||||
arg = sorted(arg) | arg = sorted(arg) |
parsers = policy.importmod('parsers') | parsers = policy.importmod('parsers') | ||||
rustrevlog = policy.importrust('revlog') | rustrevlog = policy.importrust('revlog') | ||||
termsize = scmplatform.termsize | termsize = scmplatform.termsize | ||||
@attr.s(slots=True, repr=False) | @attr.s(slots=True, repr=False) | ||||
class status(object): | class status(object): | ||||
'''Struct with a list of files per status. | """Struct with a list of files per status. | ||||
The 'deleted', 'unknown' and 'ignored' properties are only | The 'deleted', 'unknown' and 'ignored' properties are only | ||||
relevant to the working copy. | relevant to the working copy. | ||||
''' | """ | ||||
modified = attr.ib(default=attr.Factory(list)) | modified = attr.ib(default=attr.Factory(list)) | ||||
added = attr.ib(default=attr.Factory(list)) | added = attr.ib(default=attr.Factory(list)) | ||||
removed = attr.ib(default=attr.Factory(list)) | removed = attr.ib(default=attr.Factory(list)) | ||||
deleted = attr.ib(default=attr.Factory(list)) | deleted = attr.ib(default=attr.Factory(list)) | ||||
unknown = attr.ib(default=attr.Factory(list)) | unknown = attr.ib(default=attr.Factory(list)) | ||||
ignored = attr.ib(default=attr.Factory(list)) | ignored = attr.ib(default=attr.Factory(list)) | ||||
clean = attr.ib(default=attr.Factory(list)) | clean = attr.ib(default=attr.Factory(list)) | ||||
# status and diff will have an accurate result when it does | # status and diff will have an accurate result when it does | ||||
# 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared | # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared | ||||
# against itself. | # against itself. | ||||
for subpath in missing: | for subpath in missing: | ||||
yield subpath, ctx2.nullsub(subpath, ctx1) | yield subpath, ctx2.nullsub(subpath, ctx1) | ||||
def nochangesfound(ui, repo, excluded=None): | def nochangesfound(ui, repo, excluded=None): | ||||
'''Report no changes for push/pull, excluded is None or a list of | """Report no changes for push/pull, excluded is None or a list of | ||||
nodes excluded from the push/pull. | nodes excluded from the push/pull. | ||||
''' | """ | ||||
secretlist = [] | secretlist = [] | ||||
if excluded: | if excluded: | ||||
for n in excluded: | for n in excluded: | ||||
ctx = repo[n] | ctx = repo[n] | ||||
if ctx.phase() >= phases.secret and not ctx.extinct(): | if ctx.phase() >= phases.secret and not ctx.extinct(): | ||||
secretlist.append(n) | secretlist.append(n) | ||||
if secretlist: | if secretlist: | ||||
if msg: | if msg: | ||||
msg = b"%s: %s" % (msg, procutil.shellquote(f)) | msg = b"%s: %s" % (msg, procutil.shellquote(f)) | ||||
if abort: | if abort: | ||||
raise error.InputError(msg) | raise error.InputError(msg) | ||||
ui.warn(_(b"warning: %s\n") % msg) | ui.warn(_(b"warning: %s\n") % msg) | ||||
def checkportabilityalert(ui): | def checkportabilityalert(ui): | ||||
'''check if the user's config requests nothing, a warning, or abort for | """check if the user's config requests nothing, a warning, or abort for | ||||
non-portable filenames''' | non-portable filenames""" | ||||
val = ui.config(b'ui', b'portablefilenames') | val = ui.config(b'ui', b'portablefilenames') | ||||
lval = val.lower() | lval = val.lower() | ||||
bval = stringutil.parsebool(val) | bval = stringutil.parsebool(val) | ||||
abort = pycompat.iswindows or lval == b'abort' | abort = pycompat.iswindows or lval == b'abort' | ||||
warn = bval or lval == b'warn' | warn = bval or lval == b'warn' | ||||
if bval is None and not (warn or abort or lval == b'ignore'): | if bval is None and not (warn or abort or lval == b'ignore'): | ||||
raise error.ConfigError( | raise error.ConfigError( | ||||
_(b"ui.portablefilenames value is invalid ('%s')") % val | _(b"ui.portablefilenames value is invalid ('%s')") % val | ||||
for rev in revs: | for rev in revs: | ||||
s.update(b'%d;' % rev) | s.update(b'%d;' % rev) | ||||
key = s.digest() | key = s.digest() | ||||
cl._filteredrevs_hashcache[maxrev] = key | cl._filteredrevs_hashcache[maxrev] = key | ||||
return key | return key | ||||
def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): | def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): | ||||
'''yield every hg repository under path, always recursively. | """yield every hg repository under path, always recursively. | ||||
The recurse flag will only control recursion into repo working dirs''' | The recurse flag will only control recursion into repo working dirs""" | ||||
def errhandler(err): | def errhandler(err): | ||||
if err.filename == path: | if err.filename == path: | ||||
raise err | raise err | ||||
samestat = getattr(os.path, 'samestat', None) | samestat = getattr(os.path, 'samestat', None) | ||||
if followsym and samestat is not None: | if followsym and samestat is not None: | ||||
def increasingwindows(windowsize=8, sizelimit=512): | def increasingwindows(windowsize=8, sizelimit=512): | ||||
while True: | while True: | ||||
yield windowsize | yield windowsize | ||||
if windowsize < sizelimit: | if windowsize < sizelimit: | ||||
windowsize *= 2 | windowsize *= 2 | ||||
def walkchangerevs(repo, revs, makefilematcher, prepare): | def walkchangerevs(repo, revs, makefilematcher, prepare): | ||||
'''Iterate over files and the revs in a "windowed" way. | """Iterate over files and the revs in a "windowed" way. | ||||
Callers most commonly need to iterate backwards over the history | Callers most commonly need to iterate backwards over the history | ||||
in which they are interested. Doing so has awful (quadratic-looking) | in which they are interested. Doing so has awful (quadratic-looking) | ||||
performance, so we use iterators in a "windowed" way. | performance, so we use iterators in a "windowed" way. | ||||
We walk a window of revisions in the desired order. Within the | We walk a window of revisions in the desired order. Within the | ||||
window, we first walk forwards to gather data, then in the desired | window, we first walk forwards to gather data, then in the desired | ||||
order (usually backwards) to display it. | order (usually backwards) to display it. | ||||
This function returns an iterator yielding contexts. Before | This function returns an iterator yielding contexts. Before | ||||
yielding each context, the iterator will first call the prepare | yielding each context, the iterator will first call the prepare | ||||
function on each context in the window in forward order.''' | function on each context in the window in forward order.""" | ||||
if not revs: | if not revs: | ||||
return [] | return [] | ||||
change = repo.__getitem__ | change = repo.__getitem__ | ||||
def iterate(): | def iterate(): | ||||
it = iter(revs) | it = iter(revs) | ||||
stopiteration = False | stopiteration = False | ||||
def subdiruipathfn(subpath, uipathfn): | def subdiruipathfn(subpath, uipathfn): | ||||
'''Create a new uipathfn that treats the file as relative to subpath.''' | '''Create a new uipathfn that treats the file as relative to subpath.''' | ||||
return lambda f: uipathfn(posixpath.join(subpath, f)) | return lambda f: uipathfn(posixpath.join(subpath, f)) | ||||
def anypats(pats, opts): | def anypats(pats, opts): | ||||
'''Checks if any patterns, including --include and --exclude were given. | """Checks if any patterns, including --include and --exclude were given. | ||||
Some commands (e.g. addremove) use this condition for deciding whether to | Some commands (e.g. addremove) use this condition for deciding whether to | ||||
print absolute or relative paths. | print absolute or relative paths. | ||||
''' | """ | ||||
return bool(pats or opts.get(b'include') or opts.get(b'exclude')) | return bool(pats or opts.get(b'include') or opts.get(b'exclude')) | ||||
def expandpats(pats): | def expandpats(pats): | ||||
'''Expand bare globs when running on windows. | """Expand bare globs when running on windows. | ||||
On posix we assume it already has already been done by sh.''' | On posix we assume it already has already been done by sh.""" | ||||
if not util.expandglobs: | if not util.expandglobs: | ||||
return list(pats) | return list(pats) | ||||
ret = [] | ret = [] | ||||
for kindpat in pats: | for kindpat in pats: | ||||
kind, pat = matchmod._patsplit(kindpat, None) | kind, pat = matchmod._patsplit(kindpat, None) | ||||
if kind is None: | if kind is None: | ||||
try: | try: | ||||
globbed = glob.glob(pat) | globbed = glob.glob(pat) | ||||
except re.error: | except re.error: | ||||
globbed = [pat] | globbed = [pat] | ||||
if globbed: | if globbed: | ||||
ret.extend(globbed) | ret.extend(globbed) | ||||
continue | continue | ||||
ret.append(kindpat) | ret.append(kindpat) | ||||
return ret | return ret | ||||
def matchandpats( | def matchandpats( | ||||
ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None | ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None | ||||
): | ): | ||||
'''Return a matcher and the patterns that were used. | """Return a matcher and the patterns that were used. | ||||
The matcher will warn about bad matches, unless an alternate badfn callback | The matcher will warn about bad matches, unless an alternate badfn callback | ||||
is provided.''' | is provided.""" | ||||
if opts is None: | if opts is None: | ||||
opts = {} | opts = {} | ||||
if not globbed and default == b'relpath': | if not globbed and default == b'relpath': | ||||
pats = expandpats(pats or []) | pats = expandpats(pats or []) | ||||
uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True) | uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True) | ||||
def bad(f, msg): | def bad(f, msg): | ||||
return None if no special directory is configured""" | return None if no special directory is configured""" | ||||
origbackuppath = ui.config(b'ui', b'origbackuppath') | origbackuppath = ui.config(b'ui', b'origbackuppath') | ||||
if not origbackuppath: | if not origbackuppath: | ||||
return None | return None | ||||
return vfs.vfs(repo.wvfs.join(origbackuppath)) | return vfs.vfs(repo.wvfs.join(origbackuppath)) | ||||
def backuppath(ui, repo, filepath): | def backuppath(ui, repo, filepath): | ||||
'''customize where working copy backup files (.orig files) are created | """customize where working copy backup files (.orig files) are created | ||||
Fetch user defined path from config file: [ui] origbackuppath = <path> | Fetch user defined path from config file: [ui] origbackuppath = <path> | ||||
Fall back to default (filepath with .orig suffix) if not specified | Fall back to default (filepath with .orig suffix) if not specified | ||||
filepath is repo-relative | filepath is repo-relative | ||||
Returns an absolute path | Returns an absolute path | ||||
''' | """ | ||||
origvfs = getorigvfs(ui, repo) | origvfs = getorigvfs(ui, repo) | ||||
if origvfs is None: | if origvfs is None: | ||||
return repo.wjoin(filepath + b".orig") | return repo.wjoin(filepath + b".orig") | ||||
origbackupdir = origvfs.dirname(filepath) | origbackupdir = origvfs.dirname(filepath) | ||||
if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir): | if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir): | ||||
ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir)) | ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir)) | ||||
for f in rejected: | for f in rejected: | ||||
if f in m.files(): | if f in m.files(): | ||||
return 1 | return 1 | ||||
return ret | return ret | ||||
def marktouched(repo, files, similarity=0.0): | def marktouched(repo, files, similarity=0.0): | ||||
'''Assert that files have somehow been operated upon. files are relative to | """Assert that files have somehow been operated upon. files are relative to | ||||
the repo root.''' | the repo root.""" | ||||
m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x)) | m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x)) | ||||
rejected = [] | rejected = [] | ||||
added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m) | added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m) | ||||
if repo.ui.verbose: | if repo.ui.verbose: | ||||
unknownset = set(unknown + forgotten) | unknownset = set(unknown + forgotten) | ||||
toprint = unknownset.copy() | toprint = unknownset.copy() | ||||
for f in rejected: | for f in rejected: | ||||
if f in m.files(): | if f in m.files(): | ||||
return 1 | return 1 | ||||
return 0 | return 0 | ||||
def _interestingfiles(repo, matcher): | def _interestingfiles(repo, matcher): | ||||
'''Walk dirstate with matcher, looking for files that addremove would care | """Walk dirstate with matcher, looking for files that addremove would care | ||||
about. | about. | ||||
This is different from dirstate.status because it doesn't care about | This is different from dirstate.status because it doesn't care about | ||||
whether files are modified or clean.''' | whether files are modified or clean.""" | ||||
added, unknown, deleted, removed, forgotten = [], [], [], [], [] | added, unknown, deleted, removed, forgotten = [], [], [], [], [] | ||||
audit_path = pathutil.pathauditor(repo.root, cached=True) | audit_path = pathutil.pathauditor(repo.root, cached=True) | ||||
ctx = repo[None] | ctx = repo[None] | ||||
dirstate = repo.dirstate | dirstate = repo.dirstate | ||||
matcher = repo.narrowmatch(matcher, includeexact=True) | matcher = repo.narrowmatch(matcher, includeexact=True) | ||||
walkresults = dirstate.walk( | walkresults = dirstate.walk( | ||||
matcher, | matcher, | ||||
) | ) | ||||
% (uipathfn(old), uipathfn(new), score * 100) | % (uipathfn(old), uipathfn(new), score * 100) | ||||
) | ) | ||||
renames[new] = old | renames[new] = old | ||||
return renames | return renames | ||||
def _markchanges(repo, unknown, deleted, renames): | def _markchanges(repo, unknown, deleted, renames): | ||||
'''Marks the files in unknown as added, the files in deleted as removed, | """Marks the files in unknown as added, the files in deleted as removed, | ||||
and the files in renames as copied.''' | and the files in renames as copied.""" | ||||
wctx = repo[None] | wctx = repo[None] | ||||
with repo.wlock(): | with repo.wlock(): | ||||
wctx.forget(deleted) | wctx.forget(deleted) | ||||
wctx.add(unknown) | wctx.add(unknown) | ||||
for new, old in pycompat.iteritems(renames): | for new, old in pycompat.iteritems(renames): | ||||
wctx.copy(old, new) | wctx.copy(old, new) | ||||
return getrenamed | return getrenamed | ||||
rcache = {} | rcache = {} | ||||
if endrev is None: | if endrev is None: | ||||
endrev = len(repo) | endrev = len(repo) | ||||
def getrenamed(fn, rev): | def getrenamed(fn, rev): | ||||
'''looks up all renames for a file (up to endrev) the first | """looks up all renames for a file (up to endrev) the first | ||||
time the file is given. It indexes on the changerev and only | time the file is given. It indexes on the changerev and only | ||||
parses the manifest if linkrev != changerev. | parses the manifest if linkrev != changerev. | ||||
Returns rename info for fn at changerev rev.''' | Returns rename info for fn at changerev rev.""" | ||||
if fn not in rcache: | if fn not in rcache: | ||||
rcache[fn] = {} | rcache[fn] = {} | ||||
fl = repo.file(fn) | fl = repo.file(fn) | ||||
for i in fl: | for i in fl: | ||||
lr = fl.linkrev(i) | lr = fl.linkrev(i) | ||||
renamed = fl.renamed(fl.node(i)) | renamed = fl.renamed(fl.node(i)) | ||||
rcache[fn][lr] = renamed and renamed[0] | rcache[fn][lr] = renamed and renamed[0] | ||||
if lr >= endrev: | if lr >= endrev: | ||||
for dst, src in pycompat.iteritems(copies): | for dst, src in pycompat.iteritems(copies): | ||||
if src not in newctx or dst in newctx or ds[dst] != b'a': | if src not in newctx or dst in newctx or ds[dst] != b'a': | ||||
src = None | src = None | ||||
ds.copy(src, dst) | ds.copy(src, dst) | ||||
repo._quick_access_changeid_invalidate() | repo._quick_access_changeid_invalidate() | ||||
def filterrequirements(requirements): | def filterrequirements(requirements): | ||||
""" filters the requirements into two sets: | """filters the requirements into two sets: | ||||
wcreq: requirements which should be written in .hg/requires | wcreq: requirements which should be written in .hg/requires | ||||
storereq: which should be written in .hg/store/requires | storereq: which should be written in .hg/store/requires | ||||
Returns (wcreq, storereq) | Returns (wcreq, storereq) | ||||
""" | """ | ||||
if requirementsmod.SHARESAFE_REQUIREMENT in requirements: | if requirementsmod.SHARESAFE_REQUIREMENT in requirements: | ||||
wc, store = set(), set() | wc, store = set(), set() | ||||
b'%s:%s %d/%d%s (%4.2f%%)\n' | b'%s:%s %d/%d%s (%4.2f%%)\n' | ||||
% (self.topic, item, self.pos, self.total, unit, pct) | % (self.topic, item, self.pos, self.total, unit, pct) | ||||
) | ) | ||||
else: | else: | ||||
self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit)) | self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit)) | ||||
def gdinitconfig(ui): | def gdinitconfig(ui): | ||||
"""helper function to know if a repo should be created as general delta | """helper function to know if a repo should be created as general delta""" | ||||
""" | |||||
# experimental config: format.generaldelta | # experimental config: format.generaldelta | ||||
return ui.configbool(b'format', b'generaldelta') or ui.configbool( | return ui.configbool(b'format', b'generaldelta') or ui.configbool( | ||||
b'format', b'usegeneraldelta' | b'format', b'usegeneraldelta' | ||||
) | ) | ||||
def gddeltaconfig(ui): | def gddeltaconfig(ui): | ||||
"""helper function to know if incoming delta should be optimised | """helper function to know if incoming delta should be optimised""" | ||||
""" | |||||
# experimental config: format.generaldelta | # experimental config: format.generaldelta | ||||
return ui.configbool(b'format', b'generaldelta') | return ui.configbool(b'format', b'generaldelta') | ||||
class simplekeyvaluefile(object): | class simplekeyvaluefile(object): | ||||
"""A simple file with key=value lines | """A simple file with key=value lines | ||||
Keys must be alphanumerics and start with a letter, values must not | Keys must be alphanumerics and start with a letter, values must not |
local, | local, | ||||
remote, | remote, | ||||
initialsamplesize=100, | initialsamplesize=100, | ||||
fullsamplesize=200, | fullsamplesize=200, | ||||
abortwhenunrelated=True, | abortwhenunrelated=True, | ||||
ancestorsof=None, | ancestorsof=None, | ||||
samplegrowth=1.05, | samplegrowth=1.05, | ||||
): | ): | ||||
'''Return a tuple (common, anyincoming, remoteheads) used to identify | """Return a tuple (common, anyincoming, remoteheads) used to identify | ||||
missing nodes from or in remote. | missing nodes from or in remote. | ||||
''' | """ | ||||
start = util.timer() | start = util.timer() | ||||
roundtrips = 0 | roundtrips = 0 | ||||
cl = local.changelog | cl = local.changelog | ||||
clnode = cl.node | clnode = cl.node | ||||
clrev = cl.rev | clrev = cl.rev | ||||
if ancestorsof is not None: | if ancestorsof is not None: | ||||
# indices between sample and externalized version must match | # indices between sample and externalized version must match | ||||
sample = list(sample) | sample = list(sample) | ||||
else: | else: | ||||
sample = ownheads | sample = ownheads | ||||
with remote.commandexecutor() as e: | with remote.commandexecutor() as e: | ||||
fheads = e.callcommand(b'heads', {}) | fheads = e.callcommand(b'heads', {}) | ||||
fknown = e.callcommand( | fknown = e.callcommand( | ||||
b'known', {b'nodes': [clnode(r) for r in sample],} | b'known', | ||||
{ | |||||
b'nodes': [clnode(r) for r in sample], | |||||
}, | |||||
) | ) | ||||
srvheadhashes, yesno = fheads.result(), fknown.result() | srvheadhashes, yesno = fheads.result(), fknown.result() | ||||
if cl.tip() == nullid: | if cl.tip() == nullid: | ||||
if srvheadhashes != [nullid]: | if srvheadhashes != [nullid]: | ||||
return [nullid], True, srvheadhashes | return [nullid], True, srvheadhashes | ||||
return [nullid], False, [] | return [nullid], False, [] | ||||
% (roundtrips, stats['undecided'], len(sample)) | % (roundtrips, stats['undecided'], len(sample)) | ||||
) | ) | ||||
# indices between sample and externalized version must match | # indices between sample and externalized version must match | ||||
sample = list(sample) | sample = list(sample) | ||||
with remote.commandexecutor() as e: | with remote.commandexecutor() as e: | ||||
yesno = e.callcommand( | yesno = e.callcommand( | ||||
b'known', {b'nodes': [clnode(r) for r in sample],} | b'known', | ||||
{ | |||||
b'nodes': [clnode(r) for r in sample], | |||||
}, | |||||
).result() | ).result() | ||||
full = True | full = True | ||||
disco.addinfo(zip(sample, yesno)) | disco.addinfo(zip(sample, yesno)) | ||||
result = disco.commonheads() | result = disco.commonheads() | ||||
elapsed = util.timer() - start | elapsed = util.timer() - start |
def _restoreactivebookmark(repo, mark): | def _restoreactivebookmark(repo, mark): | ||||
if mark: | if mark: | ||||
bookmarks.activate(repo, mark) | bookmarks.activate(repo, mark) | ||||
def _aborttransaction(repo, tr): | def _aborttransaction(repo, tr): | ||||
'''Abort current transaction for shelve/unshelve, but keep dirstate | """Abort current transaction for shelve/unshelve, but keep dirstate""" | ||||
''' | |||||
dirstatebackupname = b'dirstate.shelve' | dirstatebackupname = b'dirstate.shelve' | ||||
repo.dirstate.savebackup(tr, dirstatebackupname) | repo.dirstate.savebackup(tr, dirstatebackupname) | ||||
tr.abort() | tr.abort() | ||||
repo.dirstate.restorebackup(None, dirstatebackupname) | repo.dirstate.restorebackup(None, dirstatebackupname) | ||||
def getshelvename(repo, parent, opts): | def getshelvename(repo, parent, opts): | ||||
"""Decide on the name this shelve is going to have""" | """Decide on the name this shelve is going to have""" |
from .i18n import _ | from .i18n import _ | ||||
from . import ( | from . import ( | ||||
mdiff, | mdiff, | ||||
pycompat, | pycompat, | ||||
) | ) | ||||
def _findexactmatches(repo, added, removed): | def _findexactmatches(repo, added, removed): | ||||
'''find renamed files that have no changes | """find renamed files that have no changes | ||||
Takes a list of new filectxs and a list of removed filectxs, and yields | Takes a list of new filectxs and a list of removed filectxs, and yields | ||||
(before, after) tuples of exact matches. | (before, after) tuples of exact matches. | ||||
''' | """ | ||||
# Build table of removed files: {hash(fctx.data()): [fctx, ...]}. | # Build table of removed files: {hash(fctx.data()): [fctx, ...]}. | ||||
# We use hash() to discard fctx.data() from memory. | # We use hash() to discard fctx.data() from memory. | ||||
hashes = {} | hashes = {} | ||||
progress = repo.ui.makeprogress( | progress = repo.ui.makeprogress( | ||||
_(b'searching for exact renames'), | _(b'searching for exact renames'), | ||||
total=(len(added) + len(removed)), | total=(len(added) + len(removed)), | ||||
unit=_(b'files'), | unit=_(b'files'), | ||||
) | ) | ||||
return equal * 2.0 / lengths | return equal * 2.0 / lengths | ||||
def score(fctx1, fctx2): | def score(fctx1, fctx2): | ||||
return _score(fctx1, _ctxdata(fctx2)) | return _score(fctx1, _ctxdata(fctx2)) | ||||
def _findsimilarmatches(repo, added, removed, threshold): | def _findsimilarmatches(repo, added, removed, threshold): | ||||
'''find potentially renamed files based on similar file content | """find potentially renamed files based on similar file content | ||||
Takes a list of new filectxs and a list of removed filectxs, and yields | Takes a list of new filectxs and a list of removed filectxs, and yields | ||||
(before, after, score) tuples of partial matches. | (before, after, score) tuples of partial matches. | ||||
''' | """ | ||||
copies = {} | copies = {} | ||||
progress = repo.ui.makeprogress( | progress = repo.ui.makeprogress( | ||||
_(b'searching for similar files'), unit=_(b'files'), total=len(removed) | _(b'searching for similar files'), unit=_(b'files'), total=len(removed) | ||||
) | ) | ||||
for r in removed: | for r in removed: | ||||
progress.increment() | progress.increment() | ||||
data = None | data = None | ||||
for a in added: | for a in added: |
sb = min(ra[1], rb[1]) | sb = min(ra[1], rb[1]) | ||||
if sa < sb: | if sa < sb: | ||||
return sa, sb | return sa, sb | ||||
else: | else: | ||||
return None | return None | ||||
def compare_range(a, astart, aend, b, bstart, bend): | def compare_range(a, astart, aend, b, bstart, bend): | ||||
"""Compare a[astart:aend] == b[bstart:bend], without slicing. | """Compare a[astart:aend] == b[bstart:bend], without slicing.""" | ||||
""" | |||||
if (aend - astart) != (bend - bstart): | if (aend - astart) != (bend - bstart): | ||||
return False | return False | ||||
for ia, ib in zip( | for ia, ib in zip( | ||||
pycompat.xrange(astart, aend), pycompat.xrange(bstart, bend) | pycompat.xrange(astart, aend), pycompat.xrange(bstart, bend) | ||||
): | ): | ||||
if a[ia] != b[ib]: | if a[ia] != b[ib]: | ||||
return False | return False | ||||
else: | else: | ||||
name_base=None, | name_base=None, | ||||
start_marker=b'<<<<<<<', | start_marker=b'<<<<<<<', | ||||
mid_marker=b'=======', | mid_marker=b'=======', | ||||
end_marker=b'>>>>>>>', | end_marker=b'>>>>>>>', | ||||
base_marker=None, | base_marker=None, | ||||
localorother=None, | localorother=None, | ||||
minimize=False, | minimize=False, | ||||
): | ): | ||||
"""Return merge in cvs-like form. | """Return merge in cvs-like form.""" | ||||
""" | |||||
self.conflicts = False | self.conflicts = False | ||||
newline = b'\n' | newline = b'\n' | ||||
if len(self.a) > 0: | if len(self.a) > 0: | ||||
if self.a[0].endswith(b'\r\n'): | if self.a[0].endswith(b'\r\n'): | ||||
newline = b'\r\n' | newline = b'\r\n' | ||||
elif self.a[0].endswith(b'\r'): | elif self.a[0].endswith(b'\r'): | ||||
newline = b'\r' | newline = b'\r' | ||||
if name_a and start_marker: | if name_a and start_marker: |
# closed prematurely. | # closed prematurely. | ||||
_forwardoutput(self._ui, self._side) | _forwardoutput(self._ui, self._side) | ||||
return r | return r | ||||
def readline(self): | def readline(self): | ||||
return self._call(b'readline') | return self._call(b'readline') | ||||
def _call(self, methname, data=None): | def _call(self, methname, data=None): | ||||
"""call <methname> on "main", forward output of "side" while blocking | """call <methname> on "main", forward output of "side" while blocking""" | ||||
""" | |||||
# data can be '' or 0 | # data can be '' or 0 | ||||
if (data is not None and not data) or self._main.closed: | if (data is not None and not data) or self._main.closed: | ||||
_forwardoutput(self._ui, self._side) | _forwardoutput(self._ui, self._side) | ||||
return b'' | return b'' | ||||
while True: | while True: | ||||
mainready, sideready = self._wait() | mainready, sideready = self._wait() | ||||
if sideready: | if sideready: | ||||
_forwardoutput(self._ui, self._side) | _forwardoutput(self._ui, self._side) |
s[b'verifymode'] = ssl.CERT_NONE | s[b'verifymode'] = ssl.CERT_NONE | ||||
assert s[b'verifymode'] is not None | assert s[b'verifymode'] is not None | ||||
return s | return s | ||||
def commonssloptions(minimumprotocol): | def commonssloptions(minimumprotocol): | ||||
"""Return SSLContext options common to servers and clients. | """Return SSLContext options common to servers and clients.""" | ||||
""" | |||||
if minimumprotocol not in configprotocols: | if minimumprotocol not in configprotocols: | ||||
raise ValueError(b'protocol value not supported: %s' % minimumprotocol) | raise ValueError(b'protocol value not supported: %s' % minimumprotocol) | ||||
# SSLv2 and SSLv3 are broken. We ban them outright. | # SSLv2 and SSLv3 are broken. We ban them outright. | ||||
options = ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | options = ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ||||
if minimumprotocol == b'tls1.0': | if minimumprotocol == b'tls1.0': | ||||
# Defaults above are to use TLS 1.0+ | # Defaults above are to use TLS 1.0+ | ||||
for frag in remainder: | for frag in remainder: | ||||
pats.append(stringutil.reescape(frag)) | pats.append(stringutil.reescape(frag)) | ||||
pat = re.compile(br'\A' + br'\.'.join(pats) + br'\Z', re.IGNORECASE) | pat = re.compile(br'\A' + br'\.'.join(pats) + br'\Z', re.IGNORECASE) | ||||
return pat.match(hostname) is not None | return pat.match(hostname) is not None | ||||
def _verifycert(cert, hostname): | def _verifycert(cert, hostname): | ||||
'''Verify that cert (in socket.getpeercert() format) matches hostname. | """Verify that cert (in socket.getpeercert() format) matches hostname. | ||||
CRLs is not handled. | CRLs is not handled. | ||||
Returns error message if any problems are found and None on success. | Returns error message if any problems are found and None on success. | ||||
''' | """ | ||||
if not cert: | if not cert: | ||||
return _(b'no certificate received') | return _(b'no certificate received') | ||||
dnsnames = [] | dnsnames = [] | ||||
san = cert.get('subjectAltName', []) | san = cert.get('subjectAltName', []) | ||||
for key, value in san: | for key, value in san: | ||||
if key == 'DNS': | if key == 'DNS': | ||||
try: | try: |
The class object can write all the data to a file in .hg/ directory and | The class object can write all the data to a file in .hg/ directory and | ||||
can populate the object data reading that file. | can populate the object data reading that file. | ||||
Uses cbor to serialize and deserialize data while writing and reading from | Uses cbor to serialize and deserialize data while writing and reading from | ||||
disk. | disk. | ||||
""" | """ | ||||
def __init__(self, repo, fname): | def __init__(self, repo, fname): | ||||
""" repo is the repo object | """repo is the repo object | ||||
fname is the file name in which data should be stored in .hg directory | fname is the file name in which data should be stored in .hg directory | ||||
""" | """ | ||||
self._repo = repo | self._repo = repo | ||||
self.fname = fname | self.fname = fname | ||||
def read(self): | def read(self): | ||||
# type: () -> Dict[bytes, Any] | # type: () -> Dict[bytes, Any] | ||||
"""read the existing state file and return a dict of data stored""" | """read the existing state file and return a dict of data stored""" | ||||
def exists(self): | def exists(self): | ||||
"""check whether the state file exists or not""" | """check whether the state file exists or not""" | ||||
return self._repo.vfs.exists(self.fname) | return self._repo.vfs.exists(self.fname) | ||||
class _statecheck(object): | class _statecheck(object): | ||||
"""a utility class that deals with multistep operations like graft, | """a utility class that deals with multistep operations like graft, | ||||
histedit, bisect, update etc and check whether such commands | histedit, bisect, update etc and check whether such commands | ||||
are in an unfinished conditition or not and return appropriate message | are in an unfinished conditition or not and return appropriate message | ||||
and hint. | and hint. | ||||
It also has the ability to register and determine the states of any new | It also has the ability to register and determine the states of any new | ||||
multistep operation or multistep command extension. | multistep operation or multistep command extension. | ||||
""" | """ | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
opname, | opname, | ||||
fname, | fname, | ||||
clearable, | clearable, | ||||
allowcommit, | allowcommit, | ||||
return _(b"use 'hg %s --continue' or 'hg %s --abort'") % ( | return _(b"use 'hg %s --continue' or 'hg %s --abort'") % ( | ||||
self._opname, | self._opname, | ||||
self._opname, | self._opname, | ||||
) | ) | ||||
else: | else: | ||||
return _( | return _( | ||||
b"use 'hg %s --continue', 'hg %s --abort', " | b"use 'hg %s --continue', 'hg %s --abort', " | ||||
b"or 'hg %s --stop'" | b"or 'hg %s --stop'" | ||||
) % (self._opname, self._opname, self._opname,) | ) % ( | ||||
self._opname, | |||||
self._opname, | |||||
self._opname, | |||||
) | |||||
return self._cmdhint | return self._cmdhint | ||||
def msg(self): | def msg(self): | ||||
"""returns the status message corresponding to the command""" | """returns the status message corresponding to the command""" | ||||
if not self._cmdmsg: | if not self._cmdmsg: | ||||
return _(b'%s in progress') % (self._opname) | return _(b'%s in progress') % (self._opname) | ||||
return self._cmdmsg | return self._cmdmsg |
sites.append( | sites.append( | ||||
CodeSite.get(siteparts[0], int(siteparts[1]), siteparts[2]) | CodeSite.get(siteparts[0], int(siteparts[1]), siteparts[2]) | ||||
) | ) | ||||
state.samples.append(Sample(sites, time)) | state.samples.append(Sample(sites, time)) | ||||
def reset(frequency=None): | def reset(frequency=None): | ||||
'''Clear out the state of the profiler. Do not call while the | """Clear out the state of the profiler. Do not call while the | ||||
profiler is running. | profiler is running. | ||||
The optional frequency argument specifies the number of samples to | The optional frequency argument specifies the number of samples to | ||||
collect per second.''' | collect per second.""" | ||||
assert state.profile_level == 0, b"Can't reset() while statprof is running" | assert state.profile_level == 0, b"Can't reset() while statprof is running" | ||||
CodeSite.cache.clear() | CodeSite.cache.clear() | ||||
state.reset(frequency) | state.reset(frequency) | ||||
@contextmanager | @contextmanager | ||||
def profile(): | def profile(): | ||||
start() | start() | ||||
if format not in (DisplayFormats.Json, DisplayFormats.Chrome): | if format not in (DisplayFormats.Json, DisplayFormats.Chrome): | ||||
fp.write(b'---\n') | fp.write(b'---\n') | ||||
fp.write(b'Sample count: %d\n' % len(data.samples)) | fp.write(b'Sample count: %d\n' % len(data.samples)) | ||||
fp.write(b'Total time: %f seconds (%f wall)\n' % data.accumulated_time) | fp.write(b'Total time: %f seconds (%f wall)\n' % data.accumulated_time) | ||||
def display_by_line(data, fp): | def display_by_line(data, fp): | ||||
'''Print the profiler data with each sample line represented | """Print the profiler data with each sample line represented | ||||
as one row in a table. Sorted by self-time per line.''' | as one row in a table. Sorted by self-time per line.""" | ||||
stats = SiteStats.buildstats(data.samples) | stats = SiteStats.buildstats(data.samples) | ||||
stats.sort(reverse=True, key=lambda x: x.selfseconds()) | stats.sort(reverse=True, key=lambda x: x.selfseconds()) | ||||
fp.write( | fp.write( | ||||
b'%5.5s %10.10s %7.7s %-8.8s\n' | b'%5.5s %10.10s %7.7s %-8.8s\n' | ||||
% (b'% ', b'cumulative', b'self', b'') | % (b'% ', b'cumulative', b'self', b'') | ||||
) | ) | ||||
fp.write( | fp.write( | ||||
stat.totalseconds(), | stat.totalseconds(), | ||||
stat.selfseconds(), | stat.selfseconds(), | ||||
sitelabel, | sitelabel, | ||||
) | ) | ||||
) | ) | ||||
def display_by_method(data, fp): | def display_by_method(data, fp): | ||||
'''Print the profiler data with each sample function represented | """Print the profiler data with each sample function represented | ||||
as one row in a table. Important lines within that function are | as one row in a table. Important lines within that function are | ||||
output as nested rows. Sorted by self-time per line.''' | output as nested rows. Sorted by self-time per line.""" | ||||
fp.write( | fp.write( | ||||
b'%5.5s %10.10s %7.7s %-8.8s\n' | b'%5.5s %10.10s %7.7s %-8.8s\n' | ||||
% (b'% ', b'cumulative', b'self', b'') | % (b'% ', b'cumulative', b'self', b'') | ||||
) | ) | ||||
fp.write( | fp.write( | ||||
b'%5.5s %9.9s %8.8s %-8.8s\n' | b'%5.5s %9.9s %8.8s %-8.8s\n' | ||||
% (b"time", b"seconds", b"seconds", b"name") | % (b"time", b"seconds", b"seconds", b"name") | ||||
) | ) | ||||
os.system(b"perl ~/flamegraph.pl %s > %s" % (path, outputfile)) | os.system(b"perl ~/flamegraph.pl %s > %s" % (path, outputfile)) | ||||
fp.write(b'Written to %s\n' % outputfile) | fp.write(b'Written to %s\n' % outputfile) | ||||
_pathcache = {} | _pathcache = {} | ||||
def simplifypath(path): | def simplifypath(path): | ||||
'''Attempt to make the path to a Python module easier to read by | """Attempt to make the path to a Python module easier to read by | ||||
removing whatever part of the Python search path it was found | removing whatever part of the Python search path it was found | ||||
on.''' | on.""" | ||||
if path in _pathcache: | if path in _pathcache: | ||||
return _pathcache[path] | return _pathcache[path] | ||||
hgpath = encoding.__file__.rsplit(os.sep, 2)[0] | hgpath = encoding.__file__.rsplit(os.sep, 2)[0] | ||||
for p in [hgpath] + sys.path: | for p in [hgpath] + sys.path: | ||||
prefix = p + os.sep | prefix = p + os.sep | ||||
if path.startswith(prefix): | if path.startswith(prefix): | ||||
path = path[len(prefix) :] | path = path[len(prefix) :] |
return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')]) | return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')]) | ||||
raise error.ProgrammingError(b"cannot decode path %s" % path) | raise error.ProgrammingError(b"cannot decode path %s" % path) | ||||
# This avoids a collision between a file named foo and a dir named | # This avoids a collision between a file named foo and a dir named | ||||
# foo.i or foo.d | # foo.i or foo.d | ||||
def _encodedir(path): | def _encodedir(path): | ||||
''' | """ | ||||
>>> _encodedir(b'data/foo.i') | >>> _encodedir(b'data/foo.i') | ||||
'data/foo.i' | 'data/foo.i' | ||||
>>> _encodedir(b'data/foo.i/bla.i') | >>> _encodedir(b'data/foo.i/bla.i') | ||||
'data/foo.i.hg/bla.i' | 'data/foo.i.hg/bla.i' | ||||
>>> _encodedir(b'data/foo.i.hg/bla.i') | >>> _encodedir(b'data/foo.i.hg/bla.i') | ||||
'data/foo.i.hg.hg/bla.i' | 'data/foo.i.hg.hg/bla.i' | ||||
>>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n') | >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n') | ||||
'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n' | 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n' | ||||
''' | """ | ||||
return ( | return ( | ||||
path.replace(b".hg/", b".hg.hg/") | path.replace(b".hg/", b".hg.hg/") | ||||
.replace(b".i/", b".i.hg/") | .replace(b".i/", b".i.hg/") | ||||
.replace(b".d/", b".d.hg/") | .replace(b".d/", b".d.hg/") | ||||
) | ) | ||||
encodedir = getattr(parsers, 'encodedir', _encodedir) | encodedir = getattr(parsers, 'encodedir', _encodedir) | ||||
def decodedir(path): | def decodedir(path): | ||||
''' | """ | ||||
>>> decodedir(b'data/foo.i') | >>> decodedir(b'data/foo.i') | ||||
'data/foo.i' | 'data/foo.i' | ||||
>>> decodedir(b'data/foo.i.hg/bla.i') | >>> decodedir(b'data/foo.i.hg/bla.i') | ||||
'data/foo.i/bla.i' | 'data/foo.i/bla.i' | ||||
>>> decodedir(b'data/foo.i.hg.hg/bla.i') | >>> decodedir(b'data/foo.i.hg.hg/bla.i') | ||||
'data/foo.i.hg/bla.i' | 'data/foo.i.hg/bla.i' | ||||
''' | """ | ||||
if b".hg/" not in path: | if b".hg/" not in path: | ||||
return path | return path | ||||
return ( | return ( | ||||
path.replace(b".d.hg/", b".d/") | path.replace(b".d.hg/", b".d/") | ||||
.replace(b".i.hg/", b".i/") | .replace(b".i.hg/", b".i/") | ||||
.replace(b".hg.hg/", b".hg/") | .replace(b".hg.hg/", b".hg/") | ||||
) | ) | ||||
def _reserved(): | def _reserved(): | ||||
''' characters that are problematic for filesystems | """characters that are problematic for filesystems | ||||
* ascii escapes (0..31) | * ascii escapes (0..31) | ||||
* ascii hi (126..255) | * ascii hi (126..255) | ||||
* windows specials | * windows specials | ||||
these characters will be escaped by encodefunctions | these characters will be escaped by encodefunctions | ||||
''' | """ | ||||
winreserved = [ord(x) for x in u'\\:*?"<>|'] | winreserved = [ord(x) for x in u'\\:*?"<>|'] | ||||
for x in range(32): | for x in range(32): | ||||
yield x | yield x | ||||
for x in range(126, 256): | for x in range(126, 256): | ||||
yield x | yield x | ||||
for x in winreserved: | for x in winreserved: | ||||
yield x | yield x | ||||
def _buildencodefun(): | def _buildencodefun(): | ||||
''' | """ | ||||
>>> enc, dec = _buildencodefun() | >>> enc, dec = _buildencodefun() | ||||
>>> enc(b'nothing/special.txt') | >>> enc(b'nothing/special.txt') | ||||
'nothing/special.txt' | 'nothing/special.txt' | ||||
>>> dec(b'nothing/special.txt') | >>> dec(b'nothing/special.txt') | ||||
'nothing/special.txt' | 'nothing/special.txt' | ||||
>>> enc(b'HELLO') | >>> enc(b'HELLO') | ||||
'_h_e_l_l_o' | '_h_e_l_l_o' | ||||
>>> dec(b'_h_e_l_l_o') | >>> dec(b'_h_e_l_l_o') | ||||
'HELLO' | 'HELLO' | ||||
>>> enc(b'hello:world?') | >>> enc(b'hello:world?') | ||||
'hello~3aworld~3f' | 'hello~3aworld~3f' | ||||
>>> dec(b'hello~3aworld~3f') | >>> dec(b'hello~3aworld~3f') | ||||
'hello:world?' | 'hello:world?' | ||||
>>> enc(b'the\\x07quick\\xADshot') | >>> enc(b'the\\x07quick\\xADshot') | ||||
'the~07quick~adshot' | 'the~07quick~adshot' | ||||
>>> dec(b'the~07quick~adshot') | >>> dec(b'the~07quick~adshot') | ||||
'the\\x07quick\\xadshot' | 'the\\x07quick\\xadshot' | ||||
''' | """ | ||||
e = b'_' | e = b'_' | ||||
xchr = pycompat.bytechr | xchr = pycompat.bytechr | ||||
asciistr = list(map(xchr, range(127))) | asciistr = list(map(xchr, range(127))) | ||||
capitals = list(range(ord(b"A"), ord(b"Z") + 1)) | capitals = list(range(ord(b"A"), ord(b"Z") + 1)) | ||||
cmap = {x: x for x in asciistr} | cmap = {x: x for x in asciistr} | ||||
for x in _reserved(): | for x in _reserved(): | ||||
cmap[xchr(x)] = b"~%02x" % x | cmap[xchr(x)] = b"~%02x" % x | ||||
lambda s: b''.join(list(decode(s))), | lambda s: b''.join(list(decode(s))), | ||||
) | ) | ||||
_encodefname, _decodefname = _buildencodefun() | _encodefname, _decodefname = _buildencodefun() | ||||
def encodefilename(s): | def encodefilename(s): | ||||
''' | """ | ||||
>>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO') | >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO') | ||||
'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o' | 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o' | ||||
''' | """ | ||||
return _encodefname(encodedir(s)) | return _encodefname(encodedir(s)) | ||||
def decodefilename(s): | def decodefilename(s): | ||||
''' | """ | ||||
>>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o') | >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o') | ||||
'foo.i/bar.d/bla.hg/hi:world?/HELLO' | 'foo.i/bar.d/bla.hg/hi:world?/HELLO' | ||||
''' | """ | ||||
return decodedir(_decodefname(s)) | return decodedir(_decodefname(s)) | ||||
def _buildlowerencodefun(): | def _buildlowerencodefun(): | ||||
''' | """ | ||||
>>> f = _buildlowerencodefun() | >>> f = _buildlowerencodefun() | ||||
>>> f(b'nothing/special.txt') | >>> f(b'nothing/special.txt') | ||||
'nothing/special.txt' | 'nothing/special.txt' | ||||
>>> f(b'HELLO') | >>> f(b'HELLO') | ||||
'hello' | 'hello' | ||||
>>> f(b'hello:world?') | >>> f(b'hello:world?') | ||||
'hello~3aworld~3f' | 'hello~3aworld~3f' | ||||
>>> f(b'the\\x07quick\\xADshot') | >>> f(b'the\\x07quick\\xADshot') | ||||
'the~07quick~adshot' | 'the~07quick~adshot' | ||||
''' | """ | ||||
xchr = pycompat.bytechr | xchr = pycompat.bytechr | ||||
cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)} | cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)} | ||||
for x in _reserved(): | for x in _reserved(): | ||||
cmap[xchr(x)] = b"~%02x" % x | cmap[xchr(x)] = b"~%02x" % x | ||||
for x in range(ord(b"A"), ord(b"Z") + 1): | for x in range(ord(b"A"), ord(b"Z") + 1): | ||||
cmap[xchr(x)] = xchr(x).lower() | cmap[xchr(x)] = xchr(x).lower() | ||||
def lowerencode(s): | def lowerencode(s): | ||||
return b"".join([cmap[c] for c in pycompat.iterbytestr(s)]) | return b"".join([cmap[c] for c in pycompat.iterbytestr(s)]) | ||||
return lowerencode | return lowerencode | ||||
lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun() | lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun() | ||||
# Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9 | # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9 | ||||
_winres3 = (b'aux', b'con', b'prn', b'nul') # length 3 | _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3 | ||||
_winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9) | _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9) | ||||
def _auxencode(path, dotencode): | def _auxencode(path, dotencode): | ||||
''' | """ | ||||
Encodes filenames containing names reserved by Windows or which end in | Encodes filenames containing names reserved by Windows or which end in | ||||
period or space. Does not touch other single reserved characters c. | period or space. Does not touch other single reserved characters c. | ||||
Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here. | Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here. | ||||
Additionally encodes space or period at the beginning, if dotencode is | Additionally encodes space or period at the beginning, if dotencode is | ||||
True. Parameter path is assumed to be all lowercase. | True. Parameter path is assumed to be all lowercase. | ||||
A segment only needs encoding if a reserved name appears as a | A segment only needs encoding if a reserved name appears as a | ||||
basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux" | basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux" | ||||
doesn't need encoding. | doesn't need encoding. | ||||
>>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.' | >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.' | ||||
>>> _auxencode(s.split(b'/'), True) | >>> _auxencode(s.split(b'/'), True) | ||||
['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e'] | ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e'] | ||||
>>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.' | >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.' | ||||
>>> _auxencode(s.split(b'/'), False) | >>> _auxencode(s.split(b'/'), False) | ||||
['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e'] | ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e'] | ||||
>>> _auxencode([b'foo. '], True) | >>> _auxencode([b'foo. '], True) | ||||
['foo.~20'] | ['foo.~20'] | ||||
>>> _auxencode([b' .foo'], True) | >>> _auxencode([b' .foo'], True) | ||||
['~20.foo'] | ['~20.foo'] | ||||
''' | """ | ||||
for i, n in enumerate(path): | for i, n in enumerate(path): | ||||
if not n: | if not n: | ||||
continue | continue | ||||
if dotencode and n[0] in b'. ': | if dotencode and n[0] in b'. ': | ||||
n = b"~%02x" % ord(n[0:1]) + n[1:] | n = b"~%02x" % ord(n[0:1]) + n[1:] | ||||
path[i] = n | path[i] = n | ||||
else: | else: | ||||
l = n.find(b'.') | l = n.find(b'.') | ||||
spaceleft = _maxstorepathlen - len(res) | spaceleft = _maxstorepathlen - len(res) | ||||
if spaceleft > 0: | if spaceleft > 0: | ||||
filler = basename[:spaceleft] | filler = basename[:spaceleft] | ||||
res = b'dh/' + dirs + filler + digest + ext | res = b'dh/' + dirs + filler + digest + ext | ||||
return res | return res | ||||
def _hybridencode(path, dotencode): | def _hybridencode(path, dotencode): | ||||
'''encodes path with a length limit | """encodes path with a length limit | ||||
Encodes all paths that begin with 'data/', according to the following. | Encodes all paths that begin with 'data/', according to the following. | ||||
Default encoding (reversible): | Default encoding (reversible): | ||||
Encodes all uppercase letters 'X' as '_x'. All reserved or illegal | Encodes all uppercase letters 'X' as '_x'. All reserved or illegal | ||||
characters are encoded as '~xx', where xx is the two digit hex code | characters are encoded as '~xx', where xx is the two digit hex code | ||||
of the character (see encodefilename). | of the character (see encodefilename). | ||||
(the basename is everything after the last path separator). The filler | (the basename is everything after the last path separator). The filler | ||||
is as long as possible, filling in characters from the basename until | is as long as possible, filling in characters from the basename until | ||||
the encoded path has _maxstorepathlen characters (or all chars of the | the encoded path has _maxstorepathlen characters (or all chars of the | ||||
basename have been taken). | basename have been taken). | ||||
The extension (e.g. '.i' or '.d') is preserved. | The extension (e.g. '.i' or '.d') is preserved. | ||||
The string 'data/' at the beginning is replaced with 'dh/', if the hashed | The string 'data/' at the beginning is replaced with 'dh/', if the hashed | ||||
encoding was used. | encoding was used. | ||||
''' | """ | ||||
path = encodedir(path) | path = encodedir(path) | ||||
ef = _encodefname(path).split(b'/') | ef = _encodefname(path).split(b'/') | ||||
res = b'/'.join(_auxencode(ef, dotencode)) | res = b'/'.join(_auxencode(ef, dotencode)) | ||||
if len(res) > _maxstorepathlen: | if len(res) > _maxstorepathlen: | ||||
res = _hashencode(path, dotencode) | res = _hashencode(path, dotencode) | ||||
return res | return res | ||||
def datafiles(self, matcher=None): | def datafiles(self, matcher=None): | ||||
return self._walk(b'data', True) + self._walk(b'meta', True) | return self._walk(b'data', True) + self._walk(b'meta', True) | ||||
def topfiles(self): | def topfiles(self): | ||||
# yield manifest before changelog | # yield manifest before changelog | ||||
return reversed(self._walk(b'', False)) | return reversed(self._walk(b'', False)) | ||||
def walk(self, matcher=None): | def walk(self, matcher=None): | ||||
'''yields (unencoded, encoded, size) | """yields (unencoded, encoded, size) | ||||
if a matcher is passed, storage files of only those tracked paths | if a matcher is passed, storage files of only those tracked paths | ||||
are passed with matches the matcher | are passed with matches the matcher | ||||
''' | """ | ||||
# yield data files first | # yield data files first | ||||
for x in self.datafiles(matcher): | for x in self.datafiles(matcher): | ||||
yield x | yield x | ||||
for x in self.topfiles(): | for x in self.topfiles(): | ||||
yield x | yield x | ||||
def copylist(self): | def copylist(self): | ||||
return _data | return _data | ||||
def __init__(self, vfs): | def __init__(self, vfs): | ||||
self.vfs = vfs | self.vfs = vfs | ||||
self.entries = None | self.entries = None | ||||
self._dirty = False | self._dirty = False | ||||
# set of new additions to fncache | # set of new additions to fncache | ||||
self.addls = set() | self.addls = set() | ||||
def ensureloaded(self, warn=None): | def ensureloaded(self, warn=None): | ||||
'''read the fncache file if not already read. | """read the fncache file if not already read. | ||||
If the file on disk is corrupted, raise. If warn is provided, | If the file on disk is corrupted, raise. If warn is provided, | ||||
warn and keep going instead.''' | warn and keep going instead.""" | ||||
if self.entries is None: | if self.entries is None: | ||||
self._load(warn) | self._load(warn) | ||||
def _load(self, warn=None): | def _load(self, warn=None): | ||||
'''fill the entries from the fncache file''' | '''fill the entries from the fncache file''' | ||||
self._dirty = False | self._dirty = False | ||||
try: | try: | ||||
fp = self.vfs(b'fncache', mode=b'rb') | fp = self.vfs(b'fncache', mode=b'rb') |
b'force', | b'force', | ||||
None, | None, | ||||
_( | _( | ||||
b'force removal of changesets, discard ' | b'force removal of changesets, discard ' | ||||
b'uncommitted changes (no backup)' | b'uncommitted changes (no backup)' | ||||
), | ), | ||||
), | ), | ||||
(b'', b'no-backup', None, _(b'do not save backup bundle')), | (b'', b'no-backup', None, _(b'do not save backup bundle')), | ||||
(b'', b'nobackup', None, _(b'do not save backup bundle (DEPRECATED)'),), | ( | ||||
b'', | |||||
b'nobackup', | |||||
None, | |||||
_(b'do not save backup bundle (DEPRECATED)'), | |||||
), | |||||
(b'n', b'', None, _(b'ignored (DEPRECATED)')), | (b'n', b'', None, _(b'ignored (DEPRECATED)')), | ||||
( | ( | ||||
b'k', | b'k', | ||||
b'keep', | b'keep', | ||||
None, | None, | ||||
_(b"do not modify working directory during strip"), | _(b"do not modify working directory during strip"), | ||||
), | ), | ||||
( | ( |
hg = None | hg = None | ||||
reporelpath = subrepoutil.reporelpath | reporelpath = subrepoutil.reporelpath | ||||
subrelpath = subrepoutil.subrelpath | subrelpath = subrepoutil.subrelpath | ||||
_abssource = subrepoutil._abssource | _abssource = subrepoutil._abssource | ||||
propertycache = util.propertycache | propertycache = util.propertycache | ||||
def _expandedabspath(path): | def _expandedabspath(path): | ||||
''' | """ | ||||
get a path or url and if it is a path expand it and return an absolute path | get a path or url and if it is a path expand it and return an absolute path | ||||
''' | """ | ||||
expandedpath = util.urllocalpath(util.expandpath(path)) | expandedpath = util.urllocalpath(util.expandpath(path)) | ||||
u = util.url(expandedpath) | u = util.url(expandedpath) | ||||
if not u.scheme: | if not u.scheme: | ||||
path = util.normpath(os.path.abspath(u.path)) | path = util.normpath(os.path.abspath(u.path)) | ||||
return path | return path | ||||
def _getstorehashcachename(remotepath): | def _getstorehashcachename(remotepath): | ||||
This returns None, otherwise. | This returns None, otherwise. | ||||
""" | """ | ||||
if self.dirty(ignoreupdate=ignoreupdate, missing=missing): | if self.dirty(ignoreupdate=ignoreupdate, missing=missing): | ||||
return _(b'uncommitted changes in subrepository "%s"') % subrelpath( | return _(b'uncommitted changes in subrepository "%s"') % subrelpath( | ||||
self | self | ||||
) | ) | ||||
def bailifchanged(self, ignoreupdate=False, hint=None): | def bailifchanged(self, ignoreupdate=False, hint=None): | ||||
"""raise Abort if subrepository is ``dirty()`` | """raise Abort if subrepository is ``dirty()``""" | ||||
""" | |||||
dirtyreason = self.dirtyreason(ignoreupdate=ignoreupdate, missing=True) | dirtyreason = self.dirtyreason(ignoreupdate=ignoreupdate, missing=True) | ||||
if dirtyreason: | if dirtyreason: | ||||
raise error.Abort(dirtyreason, hint=hint) | raise error.Abort(dirtyreason, hint=hint) | ||||
def basestate(self): | def basestate(self): | ||||
"""current working directory base state, disregarding .hgsubstate | """current working directory base state, disregarding .hgsubstate | ||||
state and working directory modifications""" | state and working directory modifications""" | ||||
raise NotImplementedError | raise NotImplementedError | ||||
def checknested(self, path): | def checknested(self, path): | ||||
"""check if path is a subrepository within this repository""" | """check if path is a subrepository within this repository""" | ||||
return False | return False | ||||
def commit(self, text, user, date): | def commit(self, text, user, date): | ||||
"""commit the current changes to the subrepo with the given | """commit the current changes to the subrepo with the given | ||||
log message. Use given user and date if possible. Return the | log message. Use given user and date if possible. Return the | ||||
new state of the subrepo. | new state of the subrepo. | ||||
""" | """ | ||||
raise NotImplementedError | raise NotImplementedError | ||||
def phase(self, state): | def phase(self, state): | ||||
"""returns phase of specified state in the subrepository. | """returns phase of specified state in the subrepository.""" | ||||
""" | |||||
return phases.public | return phases.public | ||||
def remove(self): | def remove(self): | ||||
"""remove the subrepo | """remove the subrepo | ||||
(should verify the dirstate is not dirty first) | (should verify the dirstate is not dirty first) | ||||
""" | """ | ||||
raise NotImplementedError | raise NotImplementedError | ||||
archiver.addfile( | archiver.addfile( | ||||
prefix + name, mode, symlink, self.filedata(name, decode) | prefix + name, mode, symlink, self.filedata(name, decode) | ||||
) | ) | ||||
progress.increment() | progress.increment() | ||||
progress.complete() | progress.complete() | ||||
return total | return total | ||||
def walk(self, match): | def walk(self, match): | ||||
''' | """ | ||||
walk recursively through the directory tree, finding all files | walk recursively through the directory tree, finding all files | ||||
matched by the match function | matched by the match function | ||||
''' | """ | ||||
def forget(self, match, prefix, uipathfn, dryrun, interactive): | def forget(self, match, prefix, uipathfn, dryrun, interactive): | ||||
return ([], []) | return ([], []) | ||||
def removefiles( | def removefiles( | ||||
self, | self, | ||||
matcher, | matcher, | ||||
prefix, | prefix, | ||||
% (substate[0], substate[2]) | % (substate[0], substate[2]) | ||||
) | ) | ||||
return [] | return [] | ||||
def shortid(self, revid): | def shortid(self, revid): | ||||
return revid | return revid | ||||
def unshare(self): | def unshare(self): | ||||
''' | """ | ||||
convert this repository from shared to normal storage. | convert this repository from shared to normal storage. | ||||
''' | """ | ||||
def verify(self, onpush=False): | def verify(self, onpush=False): | ||||
"""verify the revision of this repository that is held in `_state` is | """verify the revision of this repository that is held in `_state` is | ||||
present and not hidden. Return 0 on success or warning, 1 on any | present and not hidden. Return 0 on success or warning, 1 on any | ||||
error. In the case of ``onpush``, warnings or errors will raise an | error. In the case of ``onpush``, warnings or errors will raise an | ||||
exception if the result of pushing would be a broken remote repository. | exception if the result of pushing would be a broken remote repository. | ||||
""" | """ | ||||
return 0 | return 0 | ||||
@propertycache | @propertycache | ||||
def wvfs(self): | def wvfs(self): | ||||
"""return vfs to access the working directory of this subrepository | """return vfs to access the working directory of this subrepository""" | ||||
""" | |||||
return vfsmod.vfs(self._ctx.repo().wvfs.join(self._path)) | return vfsmod.vfs(self._ctx.repo().wvfs.join(self._path)) | ||||
@propertycache | @propertycache | ||||
def _relpath(self): | def _relpath(self): | ||||
"""return path to this subrepository as seen from outermost repository | """return path to this subrepository as seen from outermost repository""" | ||||
""" | |||||
return self.wvfs.reljoin(reporelpath(self._ctx.repo()), self._path) | return self.wvfs.reljoin(reporelpath(self._ctx.repo()), self._path) | ||||
class hgsubrepo(abstractsubrepo): | class hgsubrepo(abstractsubrepo): | ||||
def __init__(self, ctx, path, state, allowcreate): | def __init__(self, ctx, path, state, allowcreate): | ||||
super(hgsubrepo, self).__init__(ctx, path) | super(hgsubrepo, self).__init__(ctx, path) | ||||
self._state = state | self._state = state | ||||
r = ctx.repo() | r = ctx.repo() | ||||
break | break | ||||
if clean: | if clean: | ||||
# if not empty: | # if not empty: | ||||
# the cached and current pull states have a different size | # the cached and current pull states have a different size | ||||
clean = next(itercache, None) is None | clean = next(itercache, None) is None | ||||
return clean | return clean | ||||
def _calcstorehash(self, remotepath): | def _calcstorehash(self, remotepath): | ||||
'''calculate a unique "store hash" | """calculate a unique "store hash" | ||||
This method is used to to detect when there are changes that may | This method is used to to detect when there are changes that may | ||||
require a push to a given remote path.''' | require a push to a given remote path.""" | ||||
# sort the files that will be hashed in increasing (likely) file size | # sort the files that will be hashed in increasing (likely) file size | ||||
filelist = (b'bookmarks', b'store/phaseroots', b'store/00changelog.i') | filelist = (b'bookmarks', b'store/phaseroots', b'store/00changelog.i') | ||||
yield b'# %s\n' % _expandedabspath(remotepath) | yield b'# %s\n' % _expandedabspath(remotepath) | ||||
vfs = self._repo.vfs | vfs = self._repo.vfs | ||||
for relname in filelist: | for relname in filelist: | ||||
filehash = node.hex(hashutil.sha1(vfs.tryread(relname)).digest()) | filehash = node.hex(hashutil.sha1(vfs.tryread(relname)).digest()) | ||||
yield b'%s = %s\n' % (relname, filehash) | yield b'%s = %s\n' % (relname, filehash) | ||||
@propertycache | @propertycache | ||||
def _cachestorehashvfs(self): | def _cachestorehashvfs(self): | ||||
return vfsmod.vfs(self._repo.vfs.join(b'cache/storehash')) | return vfsmod.vfs(self._repo.vfs.join(b'cache/storehash')) | ||||
def _readstorehashcache(self, remotepath): | def _readstorehashcache(self, remotepath): | ||||
'''read the store hash cache for a given remote repository''' | '''read the store hash cache for a given remote repository''' | ||||
cachefile = _getstorehashcachename(remotepath) | cachefile = _getstorehashcachename(remotepath) | ||||
return self._cachestorehashvfs.tryreadlines(cachefile, b'r') | return self._cachestorehashvfs.tryreadlines(cachefile, b'r') | ||||
def _cachestorehash(self, remotepath): | def _cachestorehash(self, remotepath): | ||||
'''cache the current store hash | """cache the current store hash | ||||
Each remote repo requires its own store hash cache, because a subrepo | Each remote repo requires its own store hash cache, because a subrepo | ||||
store may be "clean" versus a given remote repo, but not versus another | store may be "clean" versus a given remote repo, but not versus another | ||||
''' | """ | ||||
cachefile = _getstorehashcachename(remotepath) | cachefile = _getstorehashcachename(remotepath) | ||||
with self._repo.lock(): | with self._repo.lock(): | ||||
storehash = list(self._calcstorehash(remotepath)) | storehash = list(self._calcstorehash(remotepath)) | ||||
vfs = self._cachestorehashvfs | vfs = self._cachestorehashvfs | ||||
vfs.writelines(cachefile, storehash, mode=b'wb', notindexed=True) | vfs.writelines(cachefile, storehash, mode=b'wb', notindexed=True) | ||||
def _getctx(self): | def _getctx(self): | ||||
'''fetch the context for this subrepo revision, possibly a workingctx | """fetch the context for this subrepo revision, possibly a workingctx""" | ||||
''' | |||||
if self._ctx.rev() is None: | if self._ctx.rev() is None: | ||||
return self._repo[None] # workingctx if parent is workingctx | return self._repo[None] # workingctx if parent is workingctx | ||||
else: | else: | ||||
rev = self._state[1] | rev = self._state[1] | ||||
return self._repo[rev] | return self._repo[rev] | ||||
@annotatesubrepoerror | @annotatesubrepoerror | ||||
def _initrepo(self, parentrepo, source, create): | def _initrepo(self, parentrepo, source, create): | ||||
if onpush: | if onpush: | ||||
raise error.Abort(msg) | raise error.Abort(msg) | ||||
else: | else: | ||||
self._repo.ui.warn(b'%s\n' % msg) | self._repo.ui.warn(b'%s\n' % msg) | ||||
return 0 | return 0 | ||||
@propertycache | @propertycache | ||||
def wvfs(self): | def wvfs(self): | ||||
"""return own wvfs for efficiency and consistency | """return own wvfs for efficiency and consistency""" | ||||
""" | |||||
return self._repo.wvfs | return self._repo.wvfs | ||||
@propertycache | @propertycache | ||||
def _relpath(self): | def _relpath(self): | ||||
"""return path to this subrepository as seen from outermost repository | """return path to this subrepository as seen from outermost repository""" | ||||
""" | |||||
# Keep consistent dir separators by avoiding vfs.join(self._path) | # Keep consistent dir separators by avoiding vfs.join(self._path) | ||||
return reporelpath(self._repo) | return reporelpath(self._repo) | ||||
class svnsubrepo(abstractsubrepo): | class svnsubrepo(abstractsubrepo): | ||||
def __init__(self, ctx, path, state, allowcreate): | def __init__(self, ctx, path, state, allowcreate): | ||||
super(svnsubrepo, self).__init__(ctx, path) | super(svnsubrepo, self).__init__(ctx, path) | ||||
self._state = state | self._state = state | ||||
continue | continue | ||||
item = s[0].getAttribute('item') | item = s[0].getAttribute('item') | ||||
props = s[0].getAttribute('props') | props = s[0].getAttribute('props') | ||||
path = e.getAttribute('path').encode('utf8') | path = e.getAttribute('path').encode('utf8') | ||||
if item == 'external': | if item == 'external': | ||||
externals.append(path) | externals.append(path) | ||||
elif item == 'missing': | elif item == 'missing': | ||||
missing.append(path) | missing.append(path) | ||||
if item not in ( | if ( | ||||
item | |||||
not in ( | |||||
'', | '', | ||||
'normal', | 'normal', | ||||
'unversioned', | 'unversioned', | ||||
'external', | 'external', | ||||
) or props not in ('', 'none', 'normal'): | ) | ||||
or props not in ('', 'none', 'normal') | |||||
): | |||||
changes.append(path) | changes.append(path) | ||||
for path in changes: | for path in changes: | ||||
for ext in externals: | for ext in externals: | ||||
if path == ext or path.startswith(ext + pycompat.ossep): | if path == ext or path.startswith(ext + pycompat.ossep): | ||||
return True, True, bool(missing) | return True, True, bool(missing) | ||||
return bool(changes), False, bool(missing) | return bool(changes), False, bool(missing) | ||||
@annotatesubrepoerror | @annotatesubrepoerror | ||||
m = re.search(br'^git version (\d+)\.(\d+)', out) | m = re.search(br'^git version (\d+)\.(\d+)', out) | ||||
if m: | if m: | ||||
return (int(m.group(1)), int(m.group(2)), 0) | return (int(m.group(1)), int(m.group(2)), 0) | ||||
return -1 | return -1 | ||||
@staticmethod | @staticmethod | ||||
def _checkversion(out): | def _checkversion(out): | ||||
'''ensure git version is new enough | """ensure git version is new enough | ||||
>>> _checkversion = gitsubrepo._checkversion | >>> _checkversion = gitsubrepo._checkversion | ||||
>>> _checkversion(b'git version 1.6.0') | >>> _checkversion(b'git version 1.6.0') | ||||
'ok' | 'ok' | ||||
>>> _checkversion(b'git version 1.8.5') | >>> _checkversion(b'git version 1.8.5') | ||||
'ok' | 'ok' | ||||
>>> _checkversion(b'git version 1.4.0') | >>> _checkversion(b'git version 1.4.0') | ||||
'abort' | 'abort' | ||||
>>> _checkversion(b'git version 1.5.0') | >>> _checkversion(b'git version 1.5.0') | ||||
'warning' | 'warning' | ||||
>>> _checkversion(b'git version 1.9-rc0') | >>> _checkversion(b'git version 1.9-rc0') | ||||
'ok' | 'ok' | ||||
>>> _checkversion(b'git version 1.9.0.265.g81cdec2') | >>> _checkversion(b'git version 1.9.0.265.g81cdec2') | ||||
'ok' | 'ok' | ||||
>>> _checkversion(b'git version 1.9.0.GIT') | >>> _checkversion(b'git version 1.9.0.GIT') | ||||
'ok' | 'ok' | ||||
>>> _checkversion(b'git version 12345') | >>> _checkversion(b'git version 12345') | ||||
'unknown' | 'unknown' | ||||
>>> _checkversion(b'no') | >>> _checkversion(b'no') | ||||
'unknown' | 'unknown' | ||||
''' | """ | ||||
version = gitsubrepo._gitversion(out) | version = gitsubrepo._gitversion(out) | ||||
# git 1.4.0 can't work at all, but 1.5.X can in at least some cases, | # git 1.4.0 can't work at all, but 1.5.X can in at least some cases, | ||||
# despite the docstring comment. For now, error on 1.4.0, warn on | # despite the docstring comment. For now, error on 1.4.0, warn on | ||||
# 1.5.0 but attempt to continue. | # 1.5.0 but attempt to continue. | ||||
if version == -1: | if version == -1: | ||||
return b'unknown' | return b'unknown' | ||||
if version < (1, 5, 0): | if version < (1, 5, 0): | ||||
return b'abort' | return b'abort' | ||||
def _gitupdatestat(self): | def _gitupdatestat(self): | ||||
"""This must be run before git diff-index. | """This must be run before git diff-index. | ||||
diff-index only looks at changes to file stat; | diff-index only looks at changes to file stat; | ||||
this command looks at file contents and updates the stat.""" | this command looks at file contents and updates the stat.""" | ||||
self._gitcommand([b'update-index', b'-q', b'--refresh']) | self._gitcommand([b'update-index', b'-q', b'--refresh']) | ||||
def _gitbranchmap(self): | def _gitbranchmap(self): | ||||
'''returns 2 things: | """returns 2 things: | ||||
a map from git branch to revision | a map from git branch to revision | ||||
a map from revision to branches''' | a map from revision to branches""" | ||||
branch2rev = {} | branch2rev = {} | ||||
rev2branch = {} | rev2branch = {} | ||||
out = self._gitcommand( | out = self._gitcommand( | ||||
[b'for-each-ref', b'--format', b'%(objectname) %(refname)'] | [b'for-each-ref', b'--format', b'%(objectname) %(refname)'] | ||||
) | ) | ||||
for line in out.split(b'\n'): | for line in out.split(b'\n'): | ||||
revision, ref = line.split(b' ') | revision, ref = line.split(b' ') |
tags as tagsmod, | tags as tagsmod, | ||||
util, | util, | ||||
) | ) | ||||
hexnullid = hex(nullid) | hexnullid = hex(nullid) | ||||
def readtagsformerge(ui, repo, lines, fn=b'', keeplinenums=False): | def readtagsformerge(ui, repo, lines, fn=b'', keeplinenums=False): | ||||
'''read the .hgtags file into a structure that is suitable for merging | """read the .hgtags file into a structure that is suitable for merging | ||||
Depending on the keeplinenums flag, clear the line numbers associated | Depending on the keeplinenums flag, clear the line numbers associated | ||||
with each tag. This is done because only the line numbers of the first | with each tag. This is done because only the line numbers of the first | ||||
parent are useful for merging. | parent are useful for merging. | ||||
''' | """ | ||||
filetags = tagsmod._readtaghist( | filetags = tagsmod._readtaghist( | ||||
ui, repo, lines, fn=fn, recode=None, calcnodelines=True | ui, repo, lines, fn=fn, recode=None, calcnodelines=True | ||||
)[1] | )[1] | ||||
for tagname, taginfo in filetags.items(): | for tagname, taginfo in filetags.items(): | ||||
if not keeplinenums: | if not keeplinenums: | ||||
for el in taginfo: | for el in taginfo: | ||||
el[1] = None | el[1] = None | ||||
return filetags | return filetags | ||||
def grouptagnodesbyline(tagnodes): | def grouptagnodesbyline(tagnodes): | ||||
''' | """ | ||||
Group nearby nodes (i.e. those that must be written next to each other) | Group nearby nodes (i.e. those that must be written next to each other) | ||||
The input is a list of [node, position] pairs, corresponding to a given tag | The input is a list of [node, position] pairs, corresponding to a given tag | ||||
The position is the line number where the node was found on the first parent | The position is the line number where the node was found on the first parent | ||||
.hgtags file, or None for those nodes that came from the base or the second | .hgtags file, or None for those nodes that came from the base or the second | ||||
parent .hgtags files. | parent .hgtags files. | ||||
This function groups those [node, position] pairs, returning a list of | This function groups those [node, position] pairs, returning a list of | ||||
groups of nodes that must be written next to each other because their | groups of nodes that must be written next to each other because their | ||||
positions are consecutive or have no position preference (because their | positions are consecutive or have no position preference (because their | ||||
position is None). | position is None). | ||||
The result is a list of [position, [consecutive node list]] | The result is a list of [position, [consecutive node list]] | ||||
''' | """ | ||||
firstlinenum = None | firstlinenum = None | ||||
for hexnode, linenum in tagnodes: | for hexnode, linenum in tagnodes: | ||||
firstlinenum = linenum | firstlinenum = linenum | ||||
if firstlinenum is not None: | if firstlinenum is not None: | ||||
break | break | ||||
if firstlinenum is None: | if firstlinenum is None: | ||||
return [[None, [el[0] for el in tagnodes]]] | return [[None, [el[0] for el in tagnodes]]] | ||||
tagnodes[0][1] = firstlinenum | tagnodes[0][1] = firstlinenum | ||||
groupednodes = [[firstlinenum, []]] | groupednodes = [[firstlinenum, []]] | ||||
prevlinenum = firstlinenum | prevlinenum = firstlinenum | ||||
for hexnode, linenum in tagnodes: | for hexnode, linenum in tagnodes: | ||||
if linenum is not None and linenum - prevlinenum > 1: | if linenum is not None and linenum - prevlinenum > 1: | ||||
groupednodes.append([linenum, []]) | groupednodes.append([linenum, []]) | ||||
groupednodes[-1][1].append(hexnode) | groupednodes[-1][1].append(hexnode) | ||||
if linenum is not None: | if linenum is not None: | ||||
prevlinenum = linenum | prevlinenum = linenum | ||||
return groupednodes | return groupednodes | ||||
def writemergedtags(fcd, mergedtags): | def writemergedtags(fcd, mergedtags): | ||||
''' | """ | ||||
write the merged tags while trying to minimize the diff to the first parent | write the merged tags while trying to minimize the diff to the first parent | ||||
This function uses the ordering info stored on the merged tags dict to | This function uses the ordering info stored on the merged tags dict to | ||||
generate an .hgtags file which is correct (in the sense that its contents | generate an .hgtags file which is correct (in the sense that its contents | ||||
correspond to the result of the tag merge) while also being as close as | correspond to the result of the tag merge) while also being as close as | ||||
possible to the first parent's .hgtags file. | possible to the first parent's .hgtags file. | ||||
''' | """ | ||||
# group the node-tag pairs that must be written next to each other | # group the node-tag pairs that must be written next to each other | ||||
for tname, taglist in list(mergedtags.items()): | for tname, taglist in list(mergedtags.items()): | ||||
mergedtags[tname] = grouptagnodesbyline(taglist) | mergedtags[tname] = grouptagnodesbyline(taglist) | ||||
# convert the grouped merged tags dict into a format that resembles the | # convert the grouped merged tags dict into a format that resembles the | ||||
# final .hgtags file (i.e. a list of blocks of 'node tag' pairs) | # final .hgtags file (i.e. a list of blocks of 'node tag' pairs) | ||||
def taglist2string(tlist, tname): | def taglist2string(tlist, tname): | ||||
return b'\n'.join([b'%s %s' % (hexnode, tname) for hexnode in tlist]) | return b'\n'.join([b'%s %s' % (hexnode, tname) for hexnode in tlist]) | ||||
# finally we can join the sorted groups to get the final contents of the | # finally we can join the sorted groups to get the final contents of the | ||||
# merged .hgtags file, and then write it to disk | # merged .hgtags file, and then write it to disk | ||||
mergedtagstring = b'\n'.join([tags for rank, tags in finaltags if tags]) | mergedtagstring = b'\n'.join([tags for rank, tags in finaltags if tags]) | ||||
fcd.write(mergedtagstring + b'\n', fcd.flags()) | fcd.write(mergedtagstring + b'\n', fcd.flags()) | ||||
def singletagmerge(p1nodes, p2nodes): | def singletagmerge(p1nodes, p2nodes): | ||||
''' | """ | ||||
merge the nodes corresponding to a single tag | merge the nodes corresponding to a single tag | ||||
Note that the inputs are lists of node-linenum pairs (i.e. not just lists | Note that the inputs are lists of node-linenum pairs (i.e. not just lists | ||||
of nodes) | of nodes) | ||||
''' | """ | ||||
if not p2nodes: | if not p2nodes: | ||||
return p1nodes | return p1nodes | ||||
if not p1nodes: | if not p1nodes: | ||||
return p2nodes | return p2nodes | ||||
# there is no conflict unless both tags point to different revisions | # there is no conflict unless both tags point to different revisions | ||||
# and have a non identical tag history | # and have a non identical tag history | ||||
p1currentnode = p1nodes[-1][0] | p1currentnode = p1nodes[-1][0] | ||||
# - non common lowest ranking nodes | # - non common lowest ranking nodes | ||||
# - non common highest ranking nodes | # - non common highest ranking nodes | ||||
# note that the common nodes plus the non common lowest ranking nodes is the | # note that the common nodes plus the non common lowest ranking nodes is the | ||||
# whole list of lr nodes | # whole list of lr nodes | ||||
return lrnodes + hrnodes[commonidx:] | return lrnodes + hrnodes[commonidx:] | ||||
def merge(repo, fcd, fco, fca): | def merge(repo, fcd, fco, fca): | ||||
''' | """ | ||||
Merge the tags of two revisions, taking into account the base tags | Merge the tags of two revisions, taking into account the base tags | ||||
Try to minimize the diff between the merged tags and the first parent tags | Try to minimize the diff between the merged tags and the first parent tags | ||||
''' | """ | ||||
ui = repo.ui | ui = repo.ui | ||||
# read the p1, p2 and base tags | # read the p1, p2 and base tags | ||||
# only keep the line numbers for the p1 tags | # only keep the line numbers for the p1 tags | ||||
p1tags = readtagsformerge( | p1tags = readtagsformerge( | ||||
ui, repo, fcd.data().splitlines(), fn=b"p1 tags", keeplinenums=True | ui, repo, fcd.data().splitlines(), fn=b"p1 tags", keeplinenums=True | ||||
) | ) | ||||
p2tags = readtagsformerge( | p2tags = readtagsformerge( | ||||
ui, repo, fco.data().splitlines(), fn=b"p2 tags", keeplinenums=False | ui, repo, fco.data().splitlines(), fn=b"p2 tags", keeplinenums=False |
elif new is None: | elif new is None: | ||||
fp.write(remove % (old, tag)) | fp.write(remove % (old, tag)) | ||||
else: | else: | ||||
fp.write(updateold % (old, tag)) | fp.write(updateold % (old, tag)) | ||||
fp.write(updatenew % (new, tag)) | fp.write(updatenew % (new, tag)) | ||||
def findglobaltags(ui, repo): | def findglobaltags(ui, repo): | ||||
'''Find global tags in a repo: return a tagsmap | """Find global tags in a repo: return a tagsmap | ||||
tagsmap: tag name to (node, hist) 2-tuples. | tagsmap: tag name to (node, hist) 2-tuples. | ||||
The tags cache is read and updated as a side-effect of calling. | The tags cache is read and updated as a side-effect of calling. | ||||
''' | """ | ||||
(heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo) | (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo) | ||||
if cachetags is not None: | if cachetags is not None: | ||||
assert not shouldwrite | assert not shouldwrite | ||||
# XXX is this really 100% correct? are there oddball special | # XXX is this really 100% correct? are there oddball special | ||||
# cases where a global tag should outrank a local tag but won't, | # cases where a global tag should outrank a local tag but won't, | ||||
# because cachetags does not contain rank info? | # because cachetags does not contain rank info? | ||||
alltags = {} | alltags = {} | ||||
_updatetags(cachetags, alltags) | _updatetags(cachetags, alltags) | ||||
cl.rev(filetags[t][0]) | cl.rev(filetags[t][0]) | ||||
except (LookupError, ValueError): | except (LookupError, ValueError): | ||||
del filetags[t] | del filetags[t] | ||||
_updatetags(filetags, alltags, b'local', tagtypes) | _updatetags(filetags, alltags, b'local', tagtypes) | ||||
def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False): | def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False): | ||||
'''Read tag definitions from a file (or any source of lines). | """Read tag definitions from a file (or any source of lines). | ||||
This function returns two sortdicts with similar information: | This function returns two sortdicts with similar information: | ||||
- the first dict, bintaghist, contains the tag information as expected by | - the first dict, bintaghist, contains the tag information as expected by | ||||
the _readtags function, i.e. a mapping from tag name to (node, hist): | the _readtags function, i.e. a mapping from tag name to (node, hist): | ||||
- node is the node id from the last line read for that name, | - node is the node id from the last line read for that name, | ||||
- hist is the list of node ids previously associated with it (in file | - hist is the list of node ids previously associated with it (in file | ||||
order). All node ids are binary, not hex. | order). All node ids are binary, not hex. | ||||
- the second dict, hextaglines, is a mapping from tag name to a list of | - the second dict, hextaglines, is a mapping from tag name to a list of | ||||
[hexnode, line number] pairs, ordered from the oldest to the newest node. | [hexnode, line number] pairs, ordered from the oldest to the newest node. | ||||
When calcnodelines is False the hextaglines dict is not calculated (an | When calcnodelines is False the hextaglines dict is not calculated (an | ||||
empty dict is returned). This is done to improve this function's | empty dict is returned). This is done to improve this function's | ||||
performance in cases where the line numbers are not needed. | performance in cases where the line numbers are not needed. | ||||
''' | """ | ||||
bintaghist = util.sortdict() | bintaghist = util.sortdict() | ||||
hextaglines = util.sortdict() | hextaglines = util.sortdict() | ||||
count = 0 | count = 0 | ||||
def dbg(msg): | def dbg(msg): | ||||
ui.debug(b"%s, line %d: %s\n" % (fn, count, msg)) | ui.debug(b"%s, line %d: %s\n" % (fn, count, msg)) | ||||
# map tag name to (node, hist) | # map tag name to (node, hist) | ||||
if name not in bintaghist: | if name not in bintaghist: | ||||
bintaghist[name] = [] | bintaghist[name] = [] | ||||
bintaghist[name].append(nodebin) | bintaghist[name].append(nodebin) | ||||
return bintaghist, hextaglines | return bintaghist, hextaglines | ||||
def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False): | def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False): | ||||
'''Read tag definitions from a file (or any source of lines). | """Read tag definitions from a file (or any source of lines). | ||||
Returns a mapping from tag name to (node, hist). | Returns a mapping from tag name to (node, hist). | ||||
"node" is the node id from the last line read for that name. "hist" | "node" is the node id from the last line read for that name. "hist" | ||||
is the list of node ids previously associated with it (in file order). | is the list of node ids previously associated with it (in file order). | ||||
All node ids are binary, not hex. | All node ids are binary, not hex. | ||||
''' | """ | ||||
filetags, nodelines = _readtaghist( | filetags, nodelines = _readtaghist( | ||||
ui, repo, lines, fn, recode=recode, calcnodelines=calcnodelines | ui, repo, lines, fn, recode=recode, calcnodelines=calcnodelines | ||||
) | ) | ||||
# util.sortdict().__setitem__ is much slower at replacing then inserting | # util.sortdict().__setitem__ is much slower at replacing then inserting | ||||
# new entries. The difference can matter if there are thousands of tags. | # new entries. The difference can matter if there are thousands of tags. | ||||
# Create a new sortdict to avoid the performance penalty. | # Create a new sortdict to avoid the performance penalty. | ||||
newtags = util.sortdict() | newtags = util.sortdict() | ||||
for tag, taghist in filetags.items(): | for tag, taghist in filetags.items(): | ||||
"""name of a tagcache file for a given repo or repoview""" | """name of a tagcache file for a given repo or repoview""" | ||||
filename = b'tags2' | filename = b'tags2' | ||||
if repo.filtername: | if repo.filtername: | ||||
filename = b'%s-%s' % (filename, repo.filtername) | filename = b'%s-%s' % (filename, repo.filtername) | ||||
return filename | return filename | ||||
def _readtagcache(ui, repo): | def _readtagcache(ui, repo): | ||||
'''Read the tag cache. | """Read the tag cache. | ||||
Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite). | Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite). | ||||
If the cache is completely up-to-date, "cachetags" is a dict of the | If the cache is completely up-to-date, "cachetags" is a dict of the | ||||
form returned by _readtags() and "heads", "fnodes", and "validinfo" are | form returned by _readtags() and "heads", "fnodes", and "validinfo" are | ||||
None and "shouldwrite" is False. | None and "shouldwrite" is False. | ||||
If the cache is not up to date, "cachetags" is None. "heads" is a list | If the cache is not up to date, "cachetags" is None. "heads" is a list | ||||
of all heads currently in the repository, ordered from tip to oldest. | of all heads currently in the repository, ordered from tip to oldest. | ||||
"validinfo" is a tuple describing cache validation info. This is used | "validinfo" is a tuple describing cache validation info. This is used | ||||
when writing the tags cache. "fnodes" is a mapping from head to .hgtags | when writing the tags cache. "fnodes" is a mapping from head to .hgtags | ||||
filenode. "shouldwrite" is True. | filenode. "shouldwrite" is True. | ||||
If the cache is not up to date, the caller is responsible for reading tag | If the cache is not up to date, the caller is responsible for reading tag | ||||
info from each returned head. (See findglobaltags().) | info from each returned head. (See findglobaltags().) | ||||
''' | """ | ||||
try: | try: | ||||
cachefile = repo.cachevfs(_filename(repo), b'r') | cachefile = repo.cachevfs(_filename(repo), b'r') | ||||
# force reading the file for static-http | # force reading the file for static-http | ||||
cachelines = iter(cachefile) | cachelines = iter(cachefile) | ||||
except IOError: | except IOError: | ||||
cachefile = None | cachefile = None | ||||
cacherev = None | cacherev = None | ||||
try: | try: | ||||
cachefile.close() | cachefile.close() | ||||
except (OSError, IOError): | except (OSError, IOError): | ||||
pass | pass | ||||
def tag(repo, names, node, message, local, user, date, editor=False): | def tag(repo, names, node, message, local, user, date, editor=False): | ||||
'''tag a revision with one or more symbolic names. | """tag a revision with one or more symbolic names. | ||||
names is a list of strings or, when adding a single tag, names may be a | names is a list of strings or, when adding a single tag, names may be a | ||||
string. | string. | ||||
if local is True, the tags are stored in a per-repository file. | if local is True, the tags are stored in a per-repository file. | ||||
otherwise, they are stored in the .hgtags file, and a new | otherwise, they are stored in the .hgtags file, and a new | ||||
changeset is committed with the change. | changeset is committed with the change. | ||||
keyword arguments: | keyword arguments: | ||||
local: whether to store tags in non-version-controlled file | local: whether to store tags in non-version-controlled file | ||||
(default False) | (default False) | ||||
message: commit message to use if committing | message: commit message to use if committing | ||||
user: name of user to use if committing | user: name of user to use if committing | ||||
date: date tuple to use if committing''' | date: date tuple to use if committing""" | ||||
if not local: | if not local: | ||||
m = matchmod.exact([b'.hgtags']) | m = matchmod.exact([b'.hgtags']) | ||||
st = repo.status(match=m, unknown=True, ignored=True) | st = repo.status(match=m, unknown=True, ignored=True) | ||||
if any( | if any( | ||||
( | ( | ||||
st.modified, | st.modified, | ||||
st.added, | st.added, |
""" | """ | ||||
if websubtable: | if websubtable: | ||||
for regexp, format in websubtable: | for regexp, format in websubtable: | ||||
text = regexp.sub(format, text) | text = regexp.sub(format, text) | ||||
return text | return text | ||||
def loadfilter(ui, extname, registrarobj): | def loadfilter(ui, extname, registrarobj): | ||||
"""Load template filter from specified registrarobj | """Load template filter from specified registrarobj""" | ||||
""" | |||||
for name, func in pycompat.iteritems(registrarobj._table): | for name, func in pycompat.iteritems(registrarobj._table): | ||||
filters[name] = func | filters[name] = func | ||||
# tell hggettext to extract docstrings from these functions: | # tell hggettext to extract docstrings from these functions: | ||||
i18nfunctions = filters.values() | i18nfunctions = filters.values() |
tokens = text.split(splitter) | tokens = text.split(splitter) | ||||
if num >= len(tokens) or num < -len(tokens): | if num >= len(tokens) or num < -len(tokens): | ||||
return b'' | return b'' | ||||
else: | else: | ||||
return tokens[num] | return tokens[num] | ||||
def loadfunction(ui, extname, registrarobj): | def loadfunction(ui, extname, registrarobj): | ||||
"""Load template function from specified registrarobj | """Load template function from specified registrarobj""" | ||||
""" | |||||
for name, func in pycompat.iteritems(registrarobj._table): | for name, func in pycompat.iteritems(registrarobj._table): | ||||
funcs[name] = func | funcs[name] = func | ||||
# tell hggettext to extract docstrings from these functions: | # tell hggettext to extract docstrings from these functions: | ||||
i18nfunctions = funcs.values() | i18nfunctions = funcs.values() |
tmpl = ( | tmpl = ( | ||||
b'{instability}:{if(divergentnodes, " ")}{divergentnodes} ' | b'{instability}:{if(divergentnodes, " ")}{divergentnodes} ' | ||||
b'{reason} {node|short}' | b'{reason} {node|short}' | ||||
) | ) | ||||
return templateutil.mappinglist(entries, tmpl=tmpl, sep=b'\n') | return templateutil.mappinglist(entries, tmpl=tmpl, sep=b'\n') | ||||
def loadkeyword(ui, extname, registrarobj): | def loadkeyword(ui, extname, registrarobj): | ||||
"""Load template keyword from specified registrarobj | """Load template keyword from specified registrarobj""" | ||||
""" | |||||
for name, func in pycompat.iteritems(registrarobj._table): | for name, func in pycompat.iteritems(registrarobj._table): | ||||
keywords[name] = func | keywords[name] = func | ||||
# tell hggettext to extract docstrings from these functions: | # tell hggettext to extract docstrings from these functions: | ||||
i18nfunctions = keywords.values() | i18nfunctions = keywords.values() |
def lookup(self, mapping, key): | def lookup(self, mapping, key): | ||||
return None | return None | ||||
def populatemap(self, context, origmapping, newmapping): | def populatemap(self, context, origmapping, newmapping): | ||||
return {} | return {} | ||||
class engine(object): | class engine(object): | ||||
'''template expansion engine. | """template expansion engine. | ||||
template expansion works like this. a map file contains key=value | template expansion works like this. a map file contains key=value | ||||
pairs. if value is quoted, it is treated as string. otherwise, it | pairs. if value is quoted, it is treated as string. otherwise, it | ||||
is treated as name of template file. | is treated as name of template file. | ||||
templater is asked to expand a key in map. it looks up key, and | templater is asked to expand a key in map. it looks up key, and | ||||
looks for strings like this: {foo}. it expands {foo} by looking up | looks for strings like this: {foo}. it expands {foo} by looking up | ||||
foo in map, and substituting it. expansion is recursive: it stops | foo in map, and substituting it. expansion is recursive: it stops | ||||
when there is no more {foo} to replace. | when there is no more {foo} to replace. | ||||
expansion also allows formatting and filtering. | expansion also allows formatting and filtering. | ||||
format uses key to expand each item in list. syntax is | format uses key to expand each item in list. syntax is | ||||
{key%format}. | {key%format}. | ||||
filter uses function to transform value. syntax is | filter uses function to transform value. syntax is | ||||
{key|filter1|filter2|...}.''' | {key|filter1|filter2|...}.""" | ||||
def __init__(self, loader, filters=None, defaults=None, resources=None): | def __init__(self, loader, filters=None, defaults=None, resources=None): | ||||
self._loader = loader | self._loader = loader | ||||
if filters is None: | if filters is None: | ||||
filters = {} | filters = {} | ||||
self._filters = filters | self._filters = filters | ||||
self._funcs = templatefuncs.funcs # make this a parameter if needed | self._funcs = templatefuncs.funcs # make this a parameter if needed | ||||
if defaults is None: | if defaults is None: | ||||
"""Load, parse, and cache the specified template if available""" | """Load, parse, and cache the specified template if available""" | ||||
try: | try: | ||||
self._load(t) | self._load(t) | ||||
return True | return True | ||||
except templateutil.TemplateNotFound: | except templateutil.TemplateNotFound: | ||||
return False | return False | ||||
def process(self, t, mapping): | def process(self, t, mapping): | ||||
'''Perform expansion. t is name of map element to expand. | """Perform expansion. t is name of map element to expand. | ||||
mapping contains added elements for use during expansion. Is a | mapping contains added elements for use during expansion. Is a | ||||
generator.''' | generator.""" | ||||
func, data = self._load(t) | func, data = self._load(t) | ||||
return self._expand(func, data, mapping) | return self._expand(func, data, mapping) | ||||
def expand(self, tmpl, mapping): | def expand(self, tmpl, mapping): | ||||
"""Perform expansion over a literal template | """Perform expansion over a literal template | ||||
No user aliases will be expanded since this is supposed to be called | No user aliases will be expanded since this is supposed to be called | ||||
with an internal template string. | with an internal template string. | ||||
dir = templatedir() | dir = templatedir() | ||||
if dir: | if dir: | ||||
abs = os.path.normpath(os.path.join(dir, rel)) | abs = os.path.normpath(os.path.join(dir, rel)) | ||||
if os.path.isfile(abs): | if os.path.isfile(abs): | ||||
subresource = util.posixfile(abs, b'rb') | subresource = util.posixfile(abs, b'rb') | ||||
if subresource: | if subresource: | ||||
data = subresource.read() | data = subresource.read() | ||||
conf.parse( | conf.parse( | ||||
abs, data, sections=sections, remap=remap, include=include, | abs, | ||||
data, | |||||
sections=sections, | |||||
remap=remap, | |||||
include=include, | |||||
) | ) | ||||
data = fp.read() | data = fp.read() | ||||
conf.parse(mapfile, data, remap={b'': b'templates'}, include=include) | conf.parse(mapfile, data, remap={b'': b'templates'}, include=include) | ||||
cache = {} | cache = {} | ||||
tmap = {} | tmap = {} | ||||
aliases = [] | aliases = [] | ||||
def templatedir(): | def templatedir(): | ||||
'''return the directory used for template files, or None.''' | '''return the directory used for template files, or None.''' | ||||
path = os.path.normpath(os.path.join(resourceutil.datapath, b'templates')) | path = os.path.normpath(os.path.join(resourceutil.datapath, b'templates')) | ||||
return path if os.path.isdir(path) else None | return path if os.path.isdir(path) else None | ||||
def open_template(name, templatepath=None): | def open_template(name, templatepath=None): | ||||
'''returns a file-like object for the given template, and its full path | """returns a file-like object for the given template, and its full path | ||||
If the name is a relative path and we're in a frozen binary, the template | If the name is a relative path and we're in a frozen binary, the template | ||||
will be read from the mercurial.templates package instead. The returned path | will be read from the mercurial.templates package instead. The returned path | ||||
will then be the relative path. | will then be the relative path. | ||||
''' | """ | ||||
# Does the name point directly to a map file? | # Does the name point directly to a map file? | ||||
if os.path.isfile(name) or os.path.isabs(name): | if os.path.isfile(name) or os.path.isabs(name): | ||||
return name, open(name, mode='rb') | return name, open(name, mode='rb') | ||||
# Does the name point to a template in the provided templatepath, or | # Does the name point to a template in the provided templatepath, or | ||||
# in mercurial/templates/ if no path was provided? | # in mercurial/templates/ if no path was provided? | ||||
if templatepath is None: | if templatepath is None: | ||||
templatepath = templatedir() | templatepath = templatedir() |
# Assuming a delta is stored, we shouldn't need to validate node1 in | # Assuming a delta is stored, we shouldn't need to validate node1 in | ||||
# order to retrieve node2. | # order to retrieve node2. | ||||
self.assertEqual(f.read(node2), fulltext2) | self.assertEqual(f.read(node2), fulltext2) | ||||
def testcensored(self): | def testcensored(self): | ||||
f = self._makefilefn() | f = self._makefilefn() | ||||
stored1 = storageutil.packmeta({b'censored': b'tombstone',}, b'') | stored1 = storageutil.packmeta( | ||||
{ | |||||
b'censored': b'tombstone', | |||||
}, | |||||
b'', | |||||
) | |||||
with self._maketransactionfn() as tr: | with self._maketransactionfn() as tr: | ||||
node0 = f.add(b'foo', None, tr, 0, nullid, nullid) | node0 = f.add(b'foo', None, tr, 0, nullid, nullid) | ||||
# The node value doesn't matter since we can't verify it. | # The node value doesn't matter since we can't verify it. | ||||
node1 = b'\xbb' * 20 | node1 = b'\xbb' * 20 | ||||
self._addrawrevisionfn( | self._addrawrevisionfn( | ||||
f.read(1) | f.read(1) | ||||
def testcensoredrawrevision(self): | def testcensoredrawrevision(self): | ||||
# Like above, except we do the rawdata() request first to | # Like above, except we do the rawdata() request first to | ||||
# isolate revision caching behavior. | # isolate revision caching behavior. | ||||
f = self._makefilefn() | f = self._makefilefn() | ||||
stored1 = storageutil.packmeta({b'censored': b'tombstone',}, b'') | stored1 = storageutil.packmeta( | ||||
{ | |||||
b'censored': b'tombstone', | |||||
}, | |||||
b'', | |||||
) | |||||
with self._maketransactionfn() as tr: | with self._maketransactionfn() as tr: | ||||
node0 = f.add(b'foo', None, tr, 0, nullid, nullid) | node0 = f.add(b'foo', None, tr, 0, nullid, nullid) | ||||
# The node value doesn't matter since we can't verify it. | # The node value doesn't matter since we can't verify it. | ||||
node1 = b'\xbb' * 20 | node1 = b'\xbb' * 20 | ||||
self._addrawrevisionfn( | self._addrawrevisionfn( | ||||
self.assertEqual(f.node(0), nodes[0]) | self.assertEqual(f.node(0), nodes[0]) | ||||
self.assertEqual(f.node(1), nodes[1]) | self.assertEqual(f.node(1), nodes[1]) | ||||
self.assertEqual(f.node(2), nodes[2]) | self.assertEqual(f.node(2), nodes[2]) | ||||
def testdeltaagainstcensored(self): | def testdeltaagainstcensored(self): | ||||
# Attempt to apply a delta made against a censored revision. | # Attempt to apply a delta made against a censored revision. | ||||
f = self._makefilefn() | f = self._makefilefn() | ||||
stored1 = storageutil.packmeta({b'censored': b'tombstone',}, b'') | stored1 = storageutil.packmeta( | ||||
{ | |||||
b'censored': b'tombstone', | |||||
}, | |||||
b'', | |||||
) | |||||
with self._maketransactionfn() as tr: | with self._maketransactionfn() as tr: | ||||
node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid) | node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid) | ||||
# The node value doesn't matter since we can't verify it. | # The node value doesn't matter since we can't verify it. | ||||
node1 = b'\xbb' * 20 | node1 = b'\xbb' * 20 | ||||
self._addrawrevisionfn( | self._addrawrevisionfn( |
entries = [] | entries = [] | ||||
for l in self._file: | for l in self._file: | ||||
file, troffset = l.split(b'\0') | file, troffset = l.split(b'\0') | ||||
entries.append((file, int(troffset))) | entries.append((file, int(troffset))) | ||||
return entries | return entries | ||||
@active | @active | ||||
def replace(self, file, offset): | def replace(self, file, offset): | ||||
''' | """ | ||||
replace can only replace already committed entries | replace can only replace already committed entries | ||||
that are not pending in the queue | that are not pending in the queue | ||||
''' | """ | ||||
if file in self._newfiles: | if file in self._newfiles: | ||||
if not offset: | if not offset: | ||||
return | return | ||||
self._newfiles.remove(file) | self._newfiles.remove(file) | ||||
self._offsetmap[file] = offset | self._offsetmap[file] = offset | ||||
elif file in self._offsetmap: | elif file in self._offsetmap: | ||||
if not offset: | if not offset: | ||||
del self._offsetmap[file] | del self._offsetmap[file] | ||||
Category is a unique identifier to allow overwriting an old callback | Category is a unique identifier to allow overwriting an old callback | ||||
with a newer callback. | with a newer callback. | ||||
""" | """ | ||||
self._pendingcallback[category] = callback | self._pendingcallback[category] = callback | ||||
@active | @active | ||||
def writepending(self): | def writepending(self): | ||||
'''write pending file to temporary version | """write pending file to temporary version | ||||
This is used to allow hooks to view a transaction before commit''' | This is used to allow hooks to view a transaction before commit""" | ||||
categories = sorted(self._pendingcallback) | categories = sorted(self._pendingcallback) | ||||
for cat in categories: | for cat in categories: | ||||
# remove callback since the data will have been flushed | # remove callback since the data will have been flushed | ||||
any = self._pendingcallback.pop(cat)(self) | any = self._pendingcallback.pop(cat)(self) | ||||
self._anypending = self._anypending or any | self._anypending = self._anypending or any | ||||
self._anypending |= self._generatefiles(suffix=b'.pending') | self._anypending |= self._generatefiles(suffix=b'.pending') | ||||
return self._anypending | return self._anypending | ||||
@active | @active | ||||
def hasfinalize(self, category): | def hasfinalize(self, category): | ||||
"""check is a callback already exist for a category | """check is a callback already exist for a category""" | ||||
""" | |||||
return category in self._finalizecallback | return category in self._finalizecallback | ||||
@active | @active | ||||
def addfinalize(self, category, callback): | def addfinalize(self, category, callback): | ||||
"""add a callback to be called when the transaction is closed | """add a callback to be called when the transaction is closed | ||||
The transaction will be given as callback's first argument. | The transaction will be given as callback's first argument. | ||||
Category is a unique identifier to allow overwriting an old callback | Category is a unique identifier to allow overwriting an old callback | ||||
with a newer callback. | with a newer callback. | ||||
""" | """ | ||||
self._abortcallback[category] = callback | self._abortcallback[category] = callback | ||||
@active | @active | ||||
def addvalidator(self, category, callback): | def addvalidator(self, category, callback): | ||||
""" adds a callback to be called when validating the transaction. | """adds a callback to be called when validating the transaction. | ||||
The transaction will be given as the first argument to the callback. | The transaction will be given as the first argument to the callback. | ||||
callback should raise exception if to abort transaction """ | callback should raise exception if to abort transaction""" | ||||
self._validatecallback[category] = callback | self._validatecallback[category] = callback | ||||
@active | @active | ||||
def close(self): | def close(self): | ||||
'''commit the transaction''' | '''commit the transaction''' | ||||
if self._count == 1: | if self._count == 1: | ||||
for category in sorted(self._validatecallback): | for category in sorted(self._validatecallback): | ||||
self._validatecallback[category](self) | self._validatecallback[category](self) | ||||
categories = sorted(self._postclosecallback) | categories = sorted(self._postclosecallback) | ||||
for cat in categories: | for cat in categories: | ||||
self._postclosecallback[cat](self) | self._postclosecallback[cat](self) | ||||
# Prevent double usage and help clear cycles. | # Prevent double usage and help clear cycles. | ||||
self._postclosecallback = None | self._postclosecallback = None | ||||
@active | @active | ||||
def abort(self): | def abort(self): | ||||
'''abort the transaction (generally called on error, or when the | """abort the transaction (generally called on error, or when the | ||||
transaction is not explicitly committed before going out of | transaction is not explicitly committed before going out of | ||||
scope)''' | scope)""" | ||||
self._abort() | self._abort() | ||||
def _writeundo(self): | def _writeundo(self): | ||||
"""write transaction data for possible future undo call""" | """write transaction data for possible future undo call""" | ||||
if self._undoname is None: | if self._undoname is None: | ||||
return | return | ||||
undobackupfile = self._opener.open( | undobackupfile = self._opener.open( | ||||
b"%s.backupfiles" % self._undoname, b'w' | b"%s.backupfiles" % self._undoname, b'w' |
reqcnt += 1 | reqcnt += 1 | ||||
progress.increment() | progress.increment() | ||||
repo.ui.debug( | repo.ui.debug( | ||||
b"request %d: %s\n" % (reqcnt, b" ".join(map(short, r))) | b"request %d: %s\n" % (reqcnt, b" ".join(map(short, r))) | ||||
) | ) | ||||
for p in pycompat.xrange(0, len(r), 10): | for p in pycompat.xrange(0, len(r), 10): | ||||
with remote.commandexecutor() as e: | with remote.commandexecutor() as e: | ||||
branches = e.callcommand( | branches = e.callcommand( | ||||
b'branches', {b'nodes': r[p : p + 10],} | b'branches', | ||||
{ | |||||
b'nodes': r[p : p + 10], | |||||
}, | |||||
).result() | ).result() | ||||
for b in branches: | for b in branches: | ||||
repo.ui.debug( | repo.ui.debug( | ||||
b"received %s:%s\n" % (short(b[0]), short(b[1])) | b"received %s:%s\n" % (short(b[0]), short(b[1])) | ||||
) | ) | ||||
unknown.append(b) | unknown.append(b) | ||||
# txnutil.py - transaction related utilities | # txnutil.py - transaction related utilities | ||||
# | # | ||||
# Copyright FUJIWARA Katsunori <foozy@lares.dti.ne.jp> and others | # Copyright FUJIWARA Katsunori <foozy@lares.dti.ne.jp> and others | ||||
# | # | ||||
# This software may be used and distributed according to the terms of the | # This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | # GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
import errno | import errno | ||||
from . import encoding | from . import encoding | ||||
def mayhavepending(root): | def mayhavepending(root): | ||||
'''return whether 'root' may have pending changes, which are | """return whether 'root' may have pending changes, which are | ||||
visible to this process. | visible to this process. | ||||
''' | """ | ||||
return root == encoding.environ.get(b'HG_PENDING') | return root == encoding.environ.get(b'HG_PENDING') | ||||
def trypending(root, vfs, filename, **kwargs): | def trypending(root, vfs, filename, **kwargs): | ||||
'''Open file to be read according to HG_PENDING environment variable | """Open file to be read according to HG_PENDING environment variable | ||||
This opens '.pending' of specified 'filename' only when HG_PENDING | This opens '.pending' of specified 'filename' only when HG_PENDING | ||||
is equal to 'root'. | is equal to 'root'. | ||||
This returns '(fp, is_pending_opened)' tuple. | This returns '(fp, is_pending_opened)' tuple. | ||||
''' | """ | ||||
if mayhavepending(root): | if mayhavepending(root): | ||||
try: | try: | ||||
return (vfs(b'%s.pending' % filename, **kwargs), True) | return (vfs(b'%s.pending' % filename, **kwargs), True) | ||||
except IOError as inst: | except IOError as inst: | ||||
if inst.errno != errno.ENOENT: | if inst.errno != errno.ENOENT: | ||||
raise | raise | ||||
return (vfs(filename, **kwargs), False) | return (vfs(filename, **kwargs), False) |
def walkconfig(self, untrusted=False): | def walkconfig(self, untrusted=False): | ||||
cfg = self._data(untrusted) | cfg = self._data(untrusted) | ||||
for section in cfg.sections(): | for section in cfg.sections(): | ||||
for name, value in self.configitems(section, untrusted): | for name, value in self.configitems(section, untrusted): | ||||
yield section, name, value | yield section, name, value | ||||
def plain(self, feature=None): | def plain(self, feature=None): | ||||
'''is plain mode active? | """is plain mode active? | ||||
Plain mode means that all configuration variables which affect | Plain mode means that all configuration variables which affect | ||||
the behavior and output of Mercurial should be | the behavior and output of Mercurial should be | ||||
ignored. Additionally, the output should be stable, | ignored. Additionally, the output should be stable, | ||||
reproducible and suitable for use in scripts or applications. | reproducible and suitable for use in scripts or applications. | ||||
The only way to trigger plain mode is by setting either the | The only way to trigger plain mode is by setting either the | ||||
`HGPLAIN' or `HGPLAINEXCEPT' environment variables. | `HGPLAIN' or `HGPLAINEXCEPT' environment variables. | ||||
The return value can either be | The return value can either be | ||||
- False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT | - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT | ||||
- False if feature is disabled by default and not included in HGPLAIN | - False if feature is disabled by default and not included in HGPLAIN | ||||
- True otherwise | - True otherwise | ||||
''' | """ | ||||
if ( | if ( | ||||
b'HGPLAIN' not in encoding.environ | b'HGPLAIN' not in encoding.environ | ||||
and b'HGPLAINEXCEPT' not in encoding.environ | and b'HGPLAINEXCEPT' not in encoding.environ | ||||
): | ): | ||||
return False | return False | ||||
exceptions = ( | exceptions = ( | ||||
encoding.environ.get(b'HGPLAINEXCEPT', b'').strip().split(b',') | encoding.environ.get(b'HGPLAINEXCEPT', b'').strip().split(b',') | ||||
) | ) | ||||
return self._colormode is None | return self._colormode is None | ||||
def canbatchlabeledwrites(self): | def canbatchlabeledwrites(self): | ||||
'''check if write calls with labels are batchable''' | '''check if write calls with labels are batchable''' | ||||
# Windows color printing is special, see ``write``. | # Windows color printing is special, see ``write``. | ||||
return self._colormode != b'win32' | return self._colormode != b'win32' | ||||
def write(self, *args, **opts): | def write(self, *args, **opts): | ||||
'''write args to output | """write args to output | ||||
By default, this method simply writes to the buffer or stdout. | By default, this method simply writes to the buffer or stdout. | ||||
Color mode can be set on the UI class to have the output decorated | Color mode can be set on the UI class to have the output decorated | ||||
with color modifier before being written to stdout. | with color modifier before being written to stdout. | ||||
The color used is controlled by an optional keyword argument, "label". | The color used is controlled by an optional keyword argument, "label". | ||||
This should be a string containing label names separated by space. | This should be a string containing label names separated by space. | ||||
Label names take the form of "topic.type". For example, ui.debug() | Label names take the form of "topic.type". For example, ui.debug() | ||||
issues a label of "ui.debug". | issues a label of "ui.debug". | ||||
Progress reports via stderr are normally cleared before writing as | Progress reports via stderr are normally cleared before writing as | ||||
stdout and stderr go to the same terminal. This can be skipped with | stdout and stderr go to the same terminal. This can be skipped with | ||||
the optional keyword argument "keepprogressbar". The progress bar | the optional keyword argument "keepprogressbar". The progress bar | ||||
will continue to occupy a partial line on stderr in that case. | will continue to occupy a partial line on stderr in that case. | ||||
This functionality is intended when Mercurial acts as data source | This functionality is intended when Mercurial acts as data source | ||||
in a pipe. | in a pipe. | ||||
When labeling output for a specific command, a label of | When labeling output for a specific command, a label of | ||||
"cmdname.type" is recommended. For example, status issues | "cmdname.type" is recommended. For example, status issues | ||||
a label of "status.modified" for modified files. | a label of "status.modified" for modified files. | ||||
''' | """ | ||||
dest = self._fout | dest = self._fout | ||||
# inlined _write() for speed | # inlined _write() for speed | ||||
if self._buffers: | if self._buffers: | ||||
label = opts.get('label', b'') | label = opts.get('label', b'') | ||||
if label and self._bufferapplylabels: | if label and self._bufferapplylabels: | ||||
self._buffers[-1].extend(self.label(a, label) for a in args) | self._buffers[-1].extend(self.label(a, label) for a in args) | ||||
else: | else: | ||||
return True | return True | ||||
@property | @property | ||||
def _exithandlers(self): | def _exithandlers(self): | ||||
return _reqexithandlers | return _reqexithandlers | ||||
def atexit(self, func, *args, **kwargs): | def atexit(self, func, *args, **kwargs): | ||||
'''register a function to run after dispatching a request | """register a function to run after dispatching a request | ||||
Handlers do not stay registered across request boundaries.''' | Handlers do not stay registered across request boundaries.""" | ||||
self._exithandlers.append((func, args, kwargs)) | self._exithandlers.append((func, args, kwargs)) | ||||
return func | return func | ||||
def interface(self, feature): | def interface(self, feature): | ||||
"""what interface to use for interactive console features? | """what interface to use for interactive console features? | ||||
The interface is controlled by the value of `ui.interface` but also by | The interface is controlled by the value of `ui.interface` but also by | ||||
the value of feature-specific configuration. For example: | the value of feature-specific configuration. For example: | ||||
ui.interface.histedit = text | ui.interface.histedit = text | ||||
Then histedit will use the text interface and chunkselector will use | Then histedit will use the text interface and chunkselector will use | ||||
the default curses interface (crecord at the moment). | the default curses interface (crecord at the moment). | ||||
""" | """ | ||||
alldefaults = frozenset([b"text", b"curses"]) | alldefaults = frozenset([b"text", b"curses"]) | ||||
featureinterfaces = { | featureinterfaces = { | ||||
b"chunkselector": [b"text", b"curses",], | b"chunkselector": [ | ||||
b"histedit": [b"text", b"curses",], | b"text", | ||||
b"curses", | |||||
], | |||||
b"histedit": [ | |||||
b"text", | |||||
b"curses", | |||||
], | |||||
} | } | ||||
# Feature-specific interface | # Feature-specific interface | ||||
if feature not in featureinterfaces.keys(): | if feature not in featureinterfaces.keys(): | ||||
# Programming error, not user error | # Programming error, not user error | ||||
raise ValueError(b"Unknown feature requested %s" % feature) | raise ValueError(b"Unknown feature requested %s" % feature) | ||||
availableinterfaces = frozenset(featureinterfaces[feature]) | availableinterfaces = frozenset(featureinterfaces[feature]) | ||||
self.warn( | self.warn( | ||||
_(b"invalid value for ui.interface.%s: %s (using %s)\n") | _(b"invalid value for ui.interface.%s: %s (using %s)\n") | ||||
% (feature, f, choseninterface) | % (feature, f, choseninterface) | ||||
) | ) | ||||
return choseninterface | return choseninterface | ||||
def interactive(self): | def interactive(self): | ||||
'''is interactive input allowed? | """is interactive input allowed? | ||||
An interactive session is a session where input can be reasonably read | An interactive session is a session where input can be reasonably read | ||||
from `sys.stdin'. If this function returns false, any attempt to read | from `sys.stdin'. If this function returns false, any attempt to read | ||||
from stdin should fail with an error, unless a sensible default has been | from stdin should fail with an error, unless a sensible default has been | ||||
specified. | specified. | ||||
Interactiveness is triggered by the value of the `ui.interactive' | Interactiveness is triggered by the value of the `ui.interactive' | ||||
configuration variable or - if it is unset - when `sys.stdin' points | configuration variable or - if it is unset - when `sys.stdin' points | ||||
to a terminal device. | to a terminal device. | ||||
This function refers to input only; for output, see `ui.formatted()'. | This function refers to input only; for output, see `ui.formatted()'. | ||||
''' | """ | ||||
i = self.configbool(b"ui", b"interactive") | i = self.configbool(b"ui", b"interactive") | ||||
if i is None: | if i is None: | ||||
# some environments replace stdin without implementing isatty | # some environments replace stdin without implementing isatty | ||||
# usually those are non-interactive | # usually those are non-interactive | ||||
return self._isatty(self._fin) | return self._isatty(self._fin) | ||||
return i | return i | ||||
def termwidth(self): | def termwidth(self): | ||||
'''how wide is the terminal in columns? | """how wide is the terminal in columns?""" | ||||
''' | |||||
if b'COLUMNS' in encoding.environ: | if b'COLUMNS' in encoding.environ: | ||||
try: | try: | ||||
return int(encoding.environ[b'COLUMNS']) | return int(encoding.environ[b'COLUMNS']) | ||||
except ValueError: | except ValueError: | ||||
pass | pass | ||||
return scmutil.termsize(self)[0] | return scmutil.termsize(self)[0] | ||||
def formatted(self): | def formatted(self): | ||||
'''should formatted output be used? | """should formatted output be used? | ||||
It is often desirable to format the output to suite the output medium. | It is often desirable to format the output to suite the output medium. | ||||
Examples of this are truncating long lines or colorizing messages. | Examples of this are truncating long lines or colorizing messages. | ||||
However, this is not often not desirable when piping output into other | However, this is not often not desirable when piping output into other | ||||
utilities, e.g. `grep'. | utilities, e.g. `grep'. | ||||
Formatted output is triggered by the value of the `ui.formatted' | Formatted output is triggered by the value of the `ui.formatted' | ||||
configuration variable or - if it is unset - when `sys.stdout' points | configuration variable or - if it is unset - when `sys.stdout' points | ||||
to a terminal device. Please note that `ui.formatted' should be | to a terminal device. Please note that `ui.formatted' should be | ||||
considered an implementation detail; it is not intended for use outside | considered an implementation detail; it is not intended for use outside | ||||
Mercurial or its extensions. | Mercurial or its extensions. | ||||
This function refers to output only; for input, see `ui.interactive()'. | This function refers to output only; for input, see `ui.interactive()'. | ||||
This function always returns false when in plain mode, see `ui.plain()'. | This function always returns false when in plain mode, see `ui.plain()'. | ||||
''' | """ | ||||
if self.plain(): | if self.plain(): | ||||
return False | return False | ||||
i = self.configbool(b"ui", b"formatted") | i = self.configbool(b"ui", b"formatted") | ||||
if i is None: | if i is None: | ||||
# some environments replace stdout without implementing isatty | # some environments replace stdout without implementing isatty | ||||
# usually those are non-interactive | # usually those are non-interactive | ||||
return self._isatty(self._fout) | return self._isatty(self._fout) | ||||
raise EOFError | raise EOFError | ||||
return l.rstrip(b'\n') | return l.rstrip(b'\n') | ||||
else: | else: | ||||
return encoding.strtolocal(getpass.getpass('')) | return encoding.strtolocal(getpass.getpass('')) | ||||
except EOFError: | except EOFError: | ||||
raise error.ResponseExpected() | raise error.ResponseExpected() | ||||
def status(self, *msg, **opts): | def status(self, *msg, **opts): | ||||
'''write status message to output (if ui.quiet is False) | """write status message to output (if ui.quiet is False) | ||||
This adds an output label of "ui.status". | This adds an output label of "ui.status". | ||||
''' | """ | ||||
if not self.quiet: | if not self.quiet: | ||||
self._writemsg(self._fmsgout, type=b'status', *msg, **opts) | self._writemsg(self._fmsgout, type=b'status', *msg, **opts) | ||||
def warn(self, *msg, **opts): | def warn(self, *msg, **opts): | ||||
'''write warning message to output (stderr) | """write warning message to output (stderr) | ||||
This adds an output label of "ui.warning". | This adds an output label of "ui.warning". | ||||
''' | """ | ||||
self._writemsg(self._fmsgerr, type=b'warning', *msg, **opts) | self._writemsg(self._fmsgerr, type=b'warning', *msg, **opts) | ||||
def error(self, *msg, **opts): | def error(self, *msg, **opts): | ||||
'''write error message to output (stderr) | """write error message to output (stderr) | ||||
This adds an output label of "ui.error". | This adds an output label of "ui.error". | ||||
''' | """ | ||||
self._writemsg(self._fmsgerr, type=b'error', *msg, **opts) | self._writemsg(self._fmsgerr, type=b'error', *msg, **opts) | ||||
def note(self, *msg, **opts): | def note(self, *msg, **opts): | ||||
'''write note to output (if ui.verbose is True) | """write note to output (if ui.verbose is True) | ||||
This adds an output label of "ui.note". | This adds an output label of "ui.note". | ||||
''' | """ | ||||
if self.verbose: | if self.verbose: | ||||
self._writemsg(self._fmsgout, type=b'note', *msg, **opts) | self._writemsg(self._fmsgout, type=b'note', *msg, **opts) | ||||
def debug(self, *msg, **opts): | def debug(self, *msg, **opts): | ||||
'''write debug message to output (if ui.debugflag is True) | """write debug message to output (if ui.debugflag is True) | ||||
This adds an output label of "ui.debug". | This adds an output label of "ui.debug". | ||||
''' | """ | ||||
if self.debugflag: | if self.debugflag: | ||||
self._writemsg(self._fmsgout, type=b'debug', *msg, **opts) | self._writemsg(self._fmsgout, type=b'debug', *msg, **opts) | ||||
self.log(b'debug', b'%s', b''.join(msg)) | self.log(b'debug', b'%s', b''.join(msg)) | ||||
# Aliases to defeat check-code. | # Aliases to defeat check-code. | ||||
statusnoi18n = status | statusnoi18n = status | ||||
notenoi18n = note | notenoi18n = note | ||||
warnnoi18n = warn | warnnoi18n = warn | ||||
self, | self, | ||||
cmd, | cmd, | ||||
environ=None, | environ=None, | ||||
cwd=None, | cwd=None, | ||||
onerr=None, | onerr=None, | ||||
errprefix=None, | errprefix=None, | ||||
blockedtag=None, | blockedtag=None, | ||||
): | ): | ||||
'''execute shell command with appropriate output stream. command | """execute shell command with appropriate output stream. command | ||||
output will be redirected if fout is not stdout. | output will be redirected if fout is not stdout. | ||||
if command fails and onerr is None, return status, else raise onerr | if command fails and onerr is None, return status, else raise onerr | ||||
object as exception. | object as exception. | ||||
''' | """ | ||||
if blockedtag is None: | if blockedtag is None: | ||||
# Long cmds tend to be because of an absolute path on cmd. Keep | # Long cmds tend to be because of an absolute path on cmd. Keep | ||||
# the tail end instead | # the tail end instead | ||||
cmdsuffix = cmd.translate(None, _keepalnum)[-85:] | cmdsuffix = cmd.translate(None, _keepalnum)[-85:] | ||||
blockedtag = b'unknown_system_' + cmdsuffix | blockedtag = b'unknown_system_' + cmdsuffix | ||||
out = self._fout | out = self._fout | ||||
if any(s[1] for s in self._bufferstates): | if any(s[1] for s in self._bufferstates): | ||||
out = self | out = self | ||||
return rc | return rc | ||||
def _runsystem(self, cmd, environ, cwd, out): | def _runsystem(self, cmd, environ, cwd, out): | ||||
"""actually execute the given shell command (can be overridden by | """actually execute the given shell command (can be overridden by | ||||
extensions like chg)""" | extensions like chg)""" | ||||
return procutil.system(cmd, environ=environ, cwd=cwd, out=out) | return procutil.system(cmd, environ=environ, cwd=cwd, out=out) | ||||
def traceback(self, exc=None, force=False): | def traceback(self, exc=None, force=False): | ||||
'''print exception traceback if traceback printing enabled or forced. | """print exception traceback if traceback printing enabled or forced. | ||||
only to call in exception handler. returns true if traceback | only to call in exception handler. returns true if traceback | ||||
printed.''' | printed.""" | ||||
if self.tracebackflag or force: | if self.tracebackflag or force: | ||||
if exc is None: | if exc is None: | ||||
exc = sys.exc_info() | exc = sys.exc_info() | ||||
cause = getattr(exc[1], 'cause', None) | cause = getattr(exc[1], 'cause', None) | ||||
if cause is not None: | if cause is not None: | ||||
causetb = traceback.format_tb(cause[2]) | causetb = traceback.format_tb(cause[2]) | ||||
exctb = traceback.format_tb(exc[2]) | exctb = traceback.format_tb(exc[2]) | ||||
"""Install logger which can be identified later by the given name | """Install logger which can be identified later by the given name | ||||
More than one loggers can be registered. Use extension or module | More than one loggers can be registered. Use extension or module | ||||
name to uniquely identify the logger instance. | name to uniquely identify the logger instance. | ||||
""" | """ | ||||
self._loggers[name] = logger | self._loggers[name] = logger | ||||
def log(self, event, msgfmt, *msgargs, **opts): | def log(self, event, msgfmt, *msgargs, **opts): | ||||
'''hook for logging facility extensions | """hook for logging facility extensions | ||||
event should be a readily-identifiable subsystem, which will | event should be a readily-identifiable subsystem, which will | ||||
allow filtering. | allow filtering. | ||||
msgfmt should be a newline-terminated format string to log, and | msgfmt should be a newline-terminated format string to log, and | ||||
*msgargs are %-formatted into it. | *msgargs are %-formatted into it. | ||||
**opts currently has no defined meanings. | **opts currently has no defined meanings. | ||||
''' | """ | ||||
if not self._loggers: | if not self._loggers: | ||||
return | return | ||||
activeloggers = [ | activeloggers = [ | ||||
l for l in pycompat.itervalues(self._loggers) if l.tracked(event) | l for l in pycompat.itervalues(self._loggers) if l.tracked(event) | ||||
] | ] | ||||
if not activeloggers: | if not activeloggers: | ||||
return | return | ||||
msg = msgfmt % msgargs | msg = msgfmt % msgargs | ||||
opts = pycompat.byteskwargs(opts) | opts = pycompat.byteskwargs(opts) | ||||
# guard against recursion from e.g. ui.debug() | # guard against recursion from e.g. ui.debug() | ||||
registeredloggers = self._loggers | registeredloggers = self._loggers | ||||
self._loggers = {} | self._loggers = {} | ||||
try: | try: | ||||
for logger in activeloggers: | for logger in activeloggers: | ||||
logger.log(self, event, msg, opts) | logger.log(self, event, msg, opts) | ||||
finally: | finally: | ||||
self._loggers = registeredloggers | self._loggers = registeredloggers | ||||
def label(self, msg, label): | def label(self, msg, label): | ||||
'''style msg based on supplied label | """style msg based on supplied label | ||||
If some color mode is enabled, this will add the necessary control | If some color mode is enabled, this will add the necessary control | ||||
characters to apply such color. In addition, 'debug' color mode adds | characters to apply such color. In addition, 'debug' color mode adds | ||||
markup showing which label affects a piece of text. | markup showing which label affects a piece of text. | ||||
ui.write(s, 'label') is equivalent to | ui.write(s, 'label') is equivalent to | ||||
ui.write(ui.label(s, 'label')). | ui.write(ui.label(s, 'label')). | ||||
''' | """ | ||||
if self._colormode is not None: | if self._colormode is not None: | ||||
return color.colorlabel(self, msg, label) | return color.colorlabel(self, msg, label) | ||||
return msg | return msg | ||||
def develwarn(self, msg, stacklevel=1, config=None): | def develwarn(self, msg, stacklevel=1, config=None): | ||||
"""issue a developer warning message | """issue a developer warning message | ||||
Use 'stacklevel' to report the offender some layers further up in the | Use 'stacklevel' to report the offender some layers further up in the |
httplib = util.httplib | httplib = util.httplib | ||||
stringio = util.stringio | stringio = util.stringio | ||||
urlerr = util.urlerr | urlerr = util.urlerr | ||||
urlreq = util.urlreq | urlreq = util.urlreq | ||||
def escape(s, quote=None): | def escape(s, quote=None): | ||||
'''Replace special characters "&", "<" and ">" to HTML-safe sequences. | """Replace special characters "&", "<" and ">" to HTML-safe sequences. | ||||
If the optional flag quote is true, the quotation mark character (") | If the optional flag quote is true, the quotation mark character (") | ||||
is also translated. | is also translated. | ||||
This is the same as cgi.escape in Python, but always operates on | This is the same as cgi.escape in Python, but always operates on | ||||
bytes, whereas cgi.escape in Python 3 only works on unicodes. | bytes, whereas cgi.escape in Python 3 only works on unicodes. | ||||
''' | """ | ||||
s = s.replace(b"&", b"&") | s = s.replace(b"&", b"&") | ||||
s = s.replace(b"<", b"<") | s = s.replace(b"<", b"<") | ||||
s = s.replace(b">", b">") | s = s.replace(b">", b">") | ||||
if quote: | if quote: | ||||
s = s.replace(b'"', b""") | s = s.replace(b'"', b""") | ||||
return s | return s | ||||
ui, | ui, | ||||
authinfo=None, | authinfo=None, | ||||
useragent=None, | useragent=None, | ||||
loggingfh=None, | loggingfh=None, | ||||
loggingname=b's', | loggingname=b's', | ||||
loggingopts=None, | loggingopts=None, | ||||
sendaccept=True, | sendaccept=True, | ||||
): | ): | ||||
''' | """ | ||||
construct an opener suitable for urllib2 | construct an opener suitable for urllib2 | ||||
authinfo will be added to the password manager | authinfo will be added to the password manager | ||||
The opener can be configured to log socket events if the various | The opener can be configured to log socket events if the various | ||||
``logging*`` arguments are specified. | ``logging*`` arguments are specified. | ||||
``loggingfh`` denotes a file object to log events to. | ``loggingfh`` denotes a file object to log events to. | ||||
``loggingname`` denotes the name of the to print when logging. | ``loggingname`` denotes the name of the to print when logging. | ||||
``loggingopts`` is a dict of keyword arguments to pass to the constructed | ``loggingopts`` is a dict of keyword arguments to pass to the constructed | ||||
``util.socketobserver`` instance. | ``util.socketobserver`` instance. | ||||
``sendaccept`` allows controlling whether the ``Accept`` request header | ``sendaccept`` allows controlling whether the ``Accept`` request header | ||||
is sent. The header is sent by default. | is sent. The header is sent by default. | ||||
''' | """ | ||||
timeout = ui.configwith(float, b'http', b'timeout') | timeout = ui.configwith(float, b'http', b'timeout') | ||||
handlers = [] | handlers = [] | ||||
if loggingfh: | if loggingfh: | ||||
handlers.append( | handlers.append( | ||||
logginghttphandler( | logginghttphandler( | ||||
loggingfh, loggingname, loggingopts or {}, timeout=timeout | loggingfh, loggingname, loggingopts or {}, timeout=timeout | ||||
) | ) |
b"ProxyHandler", | b"ProxyHandler", | ||||
b"Request", | b"Request", | ||||
b"url2pathname", | b"url2pathname", | ||||
b"urlopen", | b"urlopen", | ||||
), | ), | ||||
) | ) | ||||
import urllib.response | import urllib.response | ||||
urlreq._registeraliases(urllib.response, (b"addclosehook", b"addinfourl",)) | urlreq._registeraliases( | ||||
urllib.response, | |||||
( | |||||
b"addclosehook", | |||||
b"addinfourl", | |||||
), | |||||
) | |||||
import urllib.error | import urllib.error | ||||
urlerr._registeraliases(urllib.error, (b"HTTPError", b"URLError",)) | urlerr._registeraliases( | ||||
urllib.error, | |||||
( | |||||
b"HTTPError", | |||||
b"URLError", | |||||
), | |||||
) | |||||
import http.server | import http.server | ||||
httpserver._registeraliases( | httpserver._registeraliases( | ||||
http.server, | http.server, | ||||
( | ( | ||||
b"HTTPServer", | b"HTTPServer", | ||||
b"BaseHTTPRequestHandler", | b"BaseHTTPRequestHandler", | ||||
b"SimpleHTTPRequestHandler", | b"SimpleHTTPRequestHandler", | ||||
b"HTTPPasswordMgrWithDefaultRealm", | b"HTTPPasswordMgrWithDefaultRealm", | ||||
b"HTTPSHandler", | b"HTTPSHandler", | ||||
b"install_opener", | b"install_opener", | ||||
b"ProxyHandler", | b"ProxyHandler", | ||||
b"Request", | b"Request", | ||||
b"urlopen", | b"urlopen", | ||||
), | ), | ||||
) | ) | ||||
urlreq._registeraliases(urlparse, (b"urlparse", b"urlunparse",)) | urlreq._registeraliases( | ||||
urlparse, | |||||
( | |||||
b"urlparse", | |||||
b"urlunparse", | |||||
), | |||||
) | |||||
urlreq._registeralias(urlparse, b"parse_qs", b"parseqs") | urlreq._registeralias(urlparse, b"parse_qs", b"parseqs") | ||||
urlreq._registeralias(urlparse, b"parse_qsl", b"parseqsl") | urlreq._registeralias(urlparse, b"parse_qsl", b"parseqsl") | ||||
urlerr._registeraliases(urllib2, (b"HTTPError", b"URLError",)) | urlerr._registeraliases( | ||||
urllib2, | |||||
( | |||||
b"HTTPError", | |||||
b"URLError", | |||||
), | |||||
) | |||||
httpserver._registeraliases( | httpserver._registeraliases( | ||||
BaseHTTPServer, (b"HTTPServer", b"BaseHTTPRequestHandler",) | BaseHTTPServer, | ||||
( | |||||
b"HTTPServer", | |||||
b"BaseHTTPRequestHandler", | |||||
), | |||||
) | ) | ||||
httpserver._registeraliases( | httpserver._registeraliases( | ||||
SimpleHTTPServer, (b"SimpleHTTPRequestHandler",) | SimpleHTTPServer, (b"SimpleHTTPRequestHandler",) | ||||
) | ) | ||||
httpserver._registeraliases(CGIHTTPServer, (b"CGIHTTPRequestHandler",)) | httpserver._registeraliases(CGIHTTPServer, (b"CGIHTTPRequestHandler",)) | ||||
def gethost(req): | def gethost(req): | ||||
return req.get_host() | return req.get_host() |
def copy(self): | def copy(self): | ||||
"""always do a cheap copy""" | """always do a cheap copy""" | ||||
self._copied = getattr(self, '_copied', 0) + 1 | self._copied = getattr(self, '_copied', 0) + 1 | ||||
return self | return self | ||||
class sortdict(collections.OrderedDict): | class sortdict(collections.OrderedDict): | ||||
'''a simple sorted dictionary | """a simple sorted dictionary | ||||
>>> d1 = sortdict([(b'a', 0), (b'b', 1)]) | >>> d1 = sortdict([(b'a', 0), (b'b', 1)]) | ||||
>>> d2 = d1.copy() | >>> d2 = d1.copy() | ||||
>>> d2 | >>> d2 | ||||
sortdict([('a', 0), ('b', 1)]) | sortdict([('a', 0), ('b', 1)]) | ||||
>>> d2.update([(b'a', 2)]) | >>> d2.update([(b'a', 2)]) | ||||
>>> list(d2.keys()) # should still be in last-set order | >>> list(d2.keys()) # should still be in last-set order | ||||
['b', 'a'] | ['b', 'a'] | ||||
>>> d1.insert(1, b'a.5', 0.5) | >>> d1.insert(1, b'a.5', 0.5) | ||||
>>> d1 | >>> d1 | ||||
sortdict([('a', 0), ('a.5', 0.5), ('b', 1)]) | sortdict([('a', 0), ('a.5', 0.5), ('b', 1)]) | ||||
''' | """ | ||||
def __setitem__(self, key, value): | def __setitem__(self, key, value): | ||||
if key in self: | if key in self: | ||||
del self[key] | del self[key] | ||||
super(sortdict, self).__setitem__(key, value) | super(sortdict, self).__setitem__(key, value) | ||||
if pycompat.ispypy: | if pycompat.ispypy: | ||||
# __setitem__() isn't called as of PyPy 5.8.0 | # __setitem__() isn't called as of PyPy 5.8.0 | ||||
def clearcachedproperty(obj, prop): | def clearcachedproperty(obj, prop): | ||||
'''clear a cached property value, if one has been set''' | '''clear a cached property value, if one has been set''' | ||||
prop = pycompat.sysstr(prop) | prop = pycompat.sysstr(prop) | ||||
if prop in obj.__dict__: | if prop in obj.__dict__: | ||||
del obj.__dict__[prop] | del obj.__dict__[prop] | ||||
def increasingchunks(source, min=1024, max=65536): | def increasingchunks(source, min=1024, max=65536): | ||||
'''return no less than min bytes per chunk while data remains, | """return no less than min bytes per chunk while data remains, | ||||
doubling min after each chunk until it reaches max''' | doubling min after each chunk until it reaches max""" | ||||
def log2(x): | def log2(x): | ||||
if not x: | if not x: | ||||
return 0 | return 0 | ||||
i = 0 | i = 0 | ||||
while x: | while x: | ||||
x >>= 1 | x >>= 1 | ||||
i += 1 | i += 1 | ||||
if pycompat.ispypy: | if pycompat.ispypy: | ||||
# PyPy runs slower with gc disabled | # PyPy runs slower with gc disabled | ||||
nogc = lambda x: x | nogc = lambda x: x | ||||
def pathto(root, n1, n2): | def pathto(root, n1, n2): | ||||
'''return the relative path from one place to another. | """return the relative path from one place to another. | ||||
root should use os.sep to separate directories | root should use os.sep to separate directories | ||||
n1 should use os.sep to separate directories | n1 should use os.sep to separate directories | ||||
n2 should use "/" to separate directories | n2 should use "/" to separate directories | ||||
returns an os.sep-separated path. | returns an os.sep-separated path. | ||||
If n1 is a relative path, it's assumed it's | If n1 is a relative path, it's assumed it's | ||||
relative to root. | relative to root. | ||||
n2 should always be relative to root. | n2 should always be relative to root. | ||||
''' | """ | ||||
if not n1: | if not n1: | ||||
return localpath(n2) | return localpath(n2) | ||||
if os.path.isabs(n1): | if os.path.isabs(n1): | ||||
if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]: | if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]: | ||||
return os.path.join(root, localpath(n2)) | return os.path.join(root, localpath(n2)) | ||||
n2 = b'/'.join((pconvert(root), n2)) | n2 = b'/'.join((pconvert(root), n2)) | ||||
a, b = splitpath(n1), n2.split(b'/') | a, b = splitpath(n1), n2.split(b'/') | ||||
a.reverse() | a.reverse() | ||||
b'tmpfs', | b'tmpfs', | ||||
b'ufs', | b'ufs', | ||||
b'xfs', | b'xfs', | ||||
b'zfs', | b'zfs', | ||||
} | } | ||||
def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False): | def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False): | ||||
'''copy a file, preserving mode and optionally other stat info like | """copy a file, preserving mode and optionally other stat info like | ||||
atime/mtime | atime/mtime | ||||
checkambig argument is used with filestat, and is useful only if | checkambig argument is used with filestat, and is useful only if | ||||
destination file is guarded by any lock (e.g. repo.lock or | destination file is guarded by any lock (e.g. repo.lock or | ||||
repo.wlock). | repo.wlock). | ||||
copystat and checkambig should be exclusive. | copystat and checkambig should be exclusive. | ||||
''' | """ | ||||
assert not (copystat and checkambig) | assert not (copystat and checkambig) | ||||
oldstat = None | oldstat = None | ||||
if os.path.lexists(dest): | if os.path.lexists(dest): | ||||
if checkambig: | if checkambig: | ||||
oldstat = checkambig and filestat.frompath(dest) | oldstat = checkambig and filestat.frompath(dest) | ||||
unlink(dest) | unlink(dest) | ||||
if hardlink: | if hardlink: | ||||
# Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks | # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks | ||||
b'lpt7', | b'lpt7', | ||||
b'lpt8', | b'lpt8', | ||||
b'lpt9', | b'lpt9', | ||||
} | } | ||||
_winreservedchars = b':*?"<>|' | _winreservedchars = b':*?"<>|' | ||||
def checkwinfilename(path): | def checkwinfilename(path): | ||||
r'''Check that the base-relative path is a valid filename on Windows. | r"""Check that the base-relative path is a valid filename on Windows. | ||||
Returns None if the path is ok, or a UI string describing the problem. | Returns None if the path is ok, or a UI string describing the problem. | ||||
>>> checkwinfilename(b"just/a/normal/path") | >>> checkwinfilename(b"just/a/normal/path") | ||||
>>> checkwinfilename(b"foo/bar/con.xml") | >>> checkwinfilename(b"foo/bar/con.xml") | ||||
"filename contains 'con', which is reserved on Windows" | "filename contains 'con', which is reserved on Windows" | ||||
>>> checkwinfilename(b"foo/con.xml/bar") | >>> checkwinfilename(b"foo/con.xml/bar") | ||||
"filename contains 'con', which is reserved on Windows" | "filename contains 'con', which is reserved on Windows" | ||||
>>> checkwinfilename(b"foo/bar/xml.con") | >>> checkwinfilename(b"foo/bar/xml.con") | ||||
>>> checkwinfilename(b"foo/bar/AUX/bla.txt") | >>> checkwinfilename(b"foo/bar/AUX/bla.txt") | ||||
"filename contains 'AUX', which is reserved on Windows" | "filename contains 'AUX', which is reserved on Windows" | ||||
>>> checkwinfilename(b"foo/bar/bla:.txt") | >>> checkwinfilename(b"foo/bar/bla:.txt") | ||||
"filename contains ':', which is reserved on Windows" | "filename contains ':', which is reserved on Windows" | ||||
>>> checkwinfilename(b"foo/bar/b\07la.txt") | >>> checkwinfilename(b"foo/bar/b\07la.txt") | ||||
"filename contains '\\x07', which is invalid on Windows" | "filename contains '\\x07', which is invalid on Windows" | ||||
>>> checkwinfilename(b"foo/bar/bla ") | >>> checkwinfilename(b"foo/bar/bla ") | ||||
"filename ends with ' ', which is not allowed on Windows" | "filename ends with ' ', which is not allowed on Windows" | ||||
>>> checkwinfilename(b"../bar") | >>> checkwinfilename(b"../bar") | ||||
>>> checkwinfilename(b"foo\\") | >>> checkwinfilename(b"foo\\") | ||||
"filename ends with '\\', which is invalid on Windows" | "filename ends with '\\', which is invalid on Windows" | ||||
>>> checkwinfilename(b"foo\\/bar") | >>> checkwinfilename(b"foo\\/bar") | ||||
"directory name ends with '\\', which is invalid on Windows" | "directory name ends with '\\', which is invalid on Windows" | ||||
''' | """ | ||||
if path.endswith(b'\\'): | if path.endswith(b'\\'): | ||||
return _(b"filename ends with '\\', which is invalid on Windows") | return _(b"filename ends with '\\', which is invalid on Windows") | ||||
if b'\\/' in path: | if b'\\/' in path: | ||||
return _(b"directory name ends with '\\', which is invalid on Windows") | return _(b"directory name ends with '\\', which is invalid on Windows") | ||||
for n in path.replace(b'\\', b'/').split(b'/'): | for n in path.replace(b'\\', b'/').split(b'/'): | ||||
if not n: | if not n: | ||||
continue | continue | ||||
for c in _filenamebytestr(n): | for c in _filenamebytestr(n): | ||||
global _re2 | global _re2 | ||||
try: | try: | ||||
# check if match works, see issue3964 | # check if match works, see issue3964 | ||||
_re2 = bool(re2.match(r'\[([^\[]+)\]', b'[ui]')) | _re2 = bool(re2.match(r'\[([^\[]+)\]', b'[ui]')) | ||||
except ImportError: | except ImportError: | ||||
_re2 = False | _re2 = False | ||||
def compile(self, pat, flags=0): | def compile(self, pat, flags=0): | ||||
'''Compile a regular expression, using re2 if possible | """Compile a regular expression, using re2 if possible | ||||
For best performance, use only re2-compatible regexp features. The | For best performance, use only re2-compatible regexp features. The | ||||
only flags from the re module that are re2-compatible are | only flags from the re module that are re2-compatible are | ||||
IGNORECASE and MULTILINE.''' | IGNORECASE and MULTILINE.""" | ||||
if _re2 is None: | if _re2 is None: | ||||
self._checkre2() | self._checkre2() | ||||
if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0: | if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0: | ||||
if flags & remod.IGNORECASE: | if flags & remod.IGNORECASE: | ||||
pat = b'(?i)' + pat | pat = b'(?i)' + pat | ||||
if flags & remod.MULTILINE: | if flags & remod.MULTILINE: | ||||
pat = b'(?m)' + pat | pat = b'(?m)' + pat | ||||
try: | try: | ||||
return re2.compile(pat) | return re2.compile(pat) | ||||
except re2.error: | except re2.error: | ||||
pass | pass | ||||
return remod.compile(pat, flags) | return remod.compile(pat, flags) | ||||
@propertycache | @propertycache | ||||
def escape(self): | def escape(self): | ||||
'''Return the version of escape corresponding to self.compile. | """Return the version of escape corresponding to self.compile. | ||||
This is imperfect because whether re2 or re is used for a particular | This is imperfect because whether re2 or re is used for a particular | ||||
function depends on the flags, etc, but it's the best we can do. | function depends on the flags, etc, but it's the best we can do. | ||||
''' | """ | ||||
global _re2 | global _re2 | ||||
if _re2 is None: | if _re2 is None: | ||||
self._checkre2() | self._checkre2() | ||||
if _re2: | if _re2: | ||||
return re2.escape | return re2.escape | ||||
else: | else: | ||||
return remod.escape | return remod.escape | ||||
re = _re() | re = _re() | ||||
_fspathcache = {} | _fspathcache = {} | ||||
def fspath(name, root): | def fspath(name, root): | ||||
'''Get name in the case stored in the filesystem | """Get name in the case stored in the filesystem | ||||
The name should be relative to root, and be normcase-ed for efficiency. | The name should be relative to root, and be normcase-ed for efficiency. | ||||
Note that this function is unnecessary, and should not be | Note that this function is unnecessary, and should not be | ||||
called, for case-sensitive filesystems (simply because it's expensive). | called, for case-sensitive filesystems (simply because it's expensive). | ||||
The root should be normcase-ed, too. | The root should be normcase-ed, too. | ||||
''' | """ | ||||
def _makefspathcacheentry(dir): | def _makefspathcacheentry(dir): | ||||
return {normcase(n): n for n in os.listdir(dir)} | return {normcase(n): n for n in os.listdir(dir)} | ||||
seps = pycompat.ossep | seps = pycompat.ossep | ||||
if pycompat.osaltsep: | if pycompat.osaltsep: | ||||
seps = seps + pycompat.osaltsep | seps = seps + pycompat.osaltsep | ||||
# Protect backslashes. This gets silly very quickly. | # Protect backslashes. This gets silly very quickly. | ||||
return ( | return ( | ||||
path.endswith(pycompat.ossep) | path.endswith(pycompat.ossep) | ||||
or pycompat.osaltsep | or pycompat.osaltsep | ||||
and path.endswith(pycompat.osaltsep) | and path.endswith(pycompat.osaltsep) | ||||
) | ) | ||||
def splitpath(path): | def splitpath(path): | ||||
'''Split path by os.sep. | """Split path by os.sep. | ||||
Note that this function does not use os.altsep because this is | Note that this function does not use os.altsep because this is | ||||
an alternative of simple "xxx.split(os.sep)". | an alternative of simple "xxx.split(os.sep)". | ||||
It is recommended to use os.path.normpath() before using this | It is recommended to use os.path.normpath() before using this | ||||
function if need.''' | function if need.""" | ||||
return path.split(pycompat.ossep) | return path.split(pycompat.ossep) | ||||
def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False): | def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False): | ||||
"""Create a temporary file with the same contents from name | """Create a temporary file with the same contents from name | ||||
The permission bits are copied from the original file. | The permission bits are copied from the original file. | ||||
raise | raise | ||||
return True | return True | ||||
def __ne__(self, other): | def __ne__(self, other): | ||||
return not self == other | return not self == other | ||||
class atomictempfile(object): | class atomictempfile(object): | ||||
'''writable file object that atomically updates a file | """writable file object that atomically updates a file | ||||
All writes will go to a temporary copy of the original file. Call | All writes will go to a temporary copy of the original file. Call | ||||
close() when you are done writing, and atomictempfile will rename | close() when you are done writing, and atomictempfile will rename | ||||
the temporary copy to the original name, making the changes | the temporary copy to the original name, making the changes | ||||
visible. If the object is destroyed without being closed, all your | visible. If the object is destroyed without being closed, all your | ||||
writes are discarded. | writes are discarded. | ||||
checkambig argument of constructor is used with filestat, and is | checkambig argument of constructor is used with filestat, and is | ||||
useful only if target file is guarded by any lock (e.g. repo.lock | useful only if target file is guarded by any lock (e.g. repo.lock | ||||
or repo.wlock). | or repo.wlock). | ||||
''' | """ | ||||
def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False): | def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False): | ||||
self.__name = name # permanent name | self.__name = name # permanent name | ||||
self._tempname = mktempcopy( | self._tempname = mktempcopy( | ||||
name, | name, | ||||
emptyok=(b'w' in mode), | emptyok=(b'w' in mode), | ||||
createmode=createmode, | createmode=createmode, | ||||
enforcewritable=(b'w' in mode), | enforcewritable=(b'w' in mode), | ||||
timing_stats.elapsed = timer() - timing_stats.start | timing_stats.elapsed = timer() - timing_stats.start | ||||
timedcm._nested -= 1 | timedcm._nested -= 1 | ||||
timedcm._nested = 0 | timedcm._nested = 0 | ||||
def timed(func): | def timed(func): | ||||
'''Report the execution time of a function call to stderr. | """Report the execution time of a function call to stderr. | ||||
During development, use as a decorator when you need to measure | During development, use as a decorator when you need to measure | ||||
the cost of a function, e.g. as follows: | the cost of a function, e.g. as follows: | ||||
@util.timed | @util.timed | ||||
def foo(a, b, c): | def foo(a, b, c): | ||||
pass | pass | ||||
''' | """ | ||||
def wrapper(*args, **kwargs): | def wrapper(*args, **kwargs): | ||||
with timedcm(pycompat.bytestr(func.__name__)) as time_stats: | with timedcm(pycompat.bytestr(func.__name__)) as time_stats: | ||||
result = func(*args, **kwargs) | result = func(*args, **kwargs) | ||||
stderr = procutil.stderr | stderr = procutil.stderr | ||||
stderr.write( | stderr.write( | ||||
b'%s%s: %s\n' | b'%s%s: %s\n' | ||||
% ( | % ( | ||||
(b'kb', 2 ** 10), | (b'kb', 2 ** 10), | ||||
(b'mb', 2 ** 20), | (b'mb', 2 ** 20), | ||||
(b'gb', 2 ** 30), | (b'gb', 2 ** 30), | ||||
(b'b', 1), | (b'b', 1), | ||||
) | ) | ||||
def sizetoint(s): | def sizetoint(s): | ||||
'''Convert a space specifier to a byte count. | """Convert a space specifier to a byte count. | ||||
>>> sizetoint(b'30') | >>> sizetoint(b'30') | ||||
30 | 30 | ||||
>>> sizetoint(b'2.2kb') | >>> sizetoint(b'2.2kb') | ||||
2252 | 2252 | ||||
>>> sizetoint(b'6M') | >>> sizetoint(b'6M') | ||||
6291456 | 6291456 | ||||
''' | """ | ||||
t = s.strip().lower() | t = s.strip().lower() | ||||
try: | try: | ||||
for k, u in _sizeunits: | for k, u in _sizeunits: | ||||
if t.endswith(k): | if t.endswith(k): | ||||
return int(float(t[: -len(k)]) * u) | return int(float(t[: -len(k)]) * u) | ||||
return int(t) | return int(t) | ||||
except ValueError: | except ValueError: | ||||
raise error.ParseError(_(b"couldn't parse size: %s") % s) | raise error.ParseError(_(b"couldn't parse size: %s") % s) | ||||
class hooks(object): | class hooks(object): | ||||
'''A collection of hook functions that can be used to extend a | """A collection of hook functions that can be used to extend a | ||||
function's behavior. Hooks are called in lexicographic order, | function's behavior. Hooks are called in lexicographic order, | ||||
based on the names of their sources.''' | based on the names of their sources.""" | ||||
def __init__(self): | def __init__(self): | ||||
self._hooks = [] | self._hooks = [] | ||||
def add(self, source, hook): | def add(self, source, hook): | ||||
self._hooks.append((source, hook)) | self._hooks.append((source, hook)) | ||||
def __call__(self, *args): | def __call__(self, *args): | ||||
self._hooks.sort(key=lambda x: x[0]) | self._hooks.sort(key=lambda x: x[0]) | ||||
results = [] | results = [] | ||||
for source, hook in self._hooks: | for source, hook in self._hooks: | ||||
results.append(hook(*args)) | results.append(hook(*args)) | ||||
return results | return results | ||||
def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0): | def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0): | ||||
'''Yields lines for a nicely formatted stacktrace. | """Yields lines for a nicely formatted stacktrace. | ||||
Skips the 'skip' last entries, then return the last 'depth' entries. | Skips the 'skip' last entries, then return the last 'depth' entries. | ||||
Each file+linenumber is formatted according to fileline. | Each file+linenumber is formatted according to fileline. | ||||
Each line is formatted according to line. | Each line is formatted according to line. | ||||
If line is None, it yields: | If line is None, it yields: | ||||
length of longest filepath+line number, | length of longest filepath+line number, | ||||
filepath+linenumber, | filepath+linenumber, | ||||
function | function | ||||
Not be used in production code but very convenient while developing. | Not be used in production code but very convenient while developing. | ||||
''' | """ | ||||
entries = [ | entries = [ | ||||
(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func)) | (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func)) | ||||
for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1] | for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1] | ||||
][-depth:] | ][-depth:] | ||||
if entries: | if entries: | ||||
fnmax = max(len(entry[0]) for entry in entries) | fnmax = max(len(entry[0]) for entry in entries) | ||||
for fnln, func in entries: | for fnln, func in entries: | ||||
if line is None: | if line is None: | ||||
yield (fnmax, fnln, func) | yield (fnmax, fnln, func) | ||||
else: | else: | ||||
yield line % (fnmax, fnln, func) | yield line % (fnmax, fnln, func) | ||||
def debugstacktrace( | def debugstacktrace( | ||||
msg=b'stacktrace', | msg=b'stacktrace', | ||||
skip=0, | skip=0, | ||||
f=procutil.stderr, | f=procutil.stderr, | ||||
otherf=procutil.stdout, | otherf=procutil.stdout, | ||||
depth=0, | depth=0, | ||||
prefix=b'', | prefix=b'', | ||||
): | ): | ||||
'''Writes a message to f (stderr) with a nicely formatted stacktrace. | """Writes a message to f (stderr) with a nicely formatted stacktrace. | ||||
Skips the 'skip' entries closest to the call, then show 'depth' entries. | Skips the 'skip' entries closest to the call, then show 'depth' entries. | ||||
By default it will flush stdout first. | By default it will flush stdout first. | ||||
It can be used everywhere and intentionally does not require an ui object. | It can be used everywhere and intentionally does not require an ui object. | ||||
Not be used in production code but very convenient while developing. | Not be used in production code but very convenient while developing. | ||||
''' | """ | ||||
if otherf: | if otherf: | ||||
otherf.flush() | otherf.flush() | ||||
f.write(b'%s%s at:\n' % (prefix, msg.rstrip())) | f.write(b'%s%s at:\n' % (prefix, msg.rstrip())) | ||||
for line in getstackframes(skip + 1, depth=depth): | for line in getstackframes(skip + 1, depth=depth): | ||||
f.write(prefix + line) | f.write(prefix + line) | ||||
f.flush() | f.flush() | ||||
# complex value to deal with. | # complex value to deal with. | ||||
if self._state == self._STATE_NONE: | if self._state == self._STATE_NONE: | ||||
# A normal value. | # A normal value. | ||||
if special == SPECIAL_NONE: | if special == SPECIAL_NONE: | ||||
self._decodedvalues.append(value) | self._decodedvalues.append(value) | ||||
elif special == SPECIAL_START_ARRAY: | elif special == SPECIAL_START_ARRAY: | ||||
self._collectionstack.append( | self._collectionstack.append( | ||||
{b'remaining': value, b'v': [],} | { | ||||
b'remaining': value, | |||||
b'v': [], | |||||
} | |||||
) | ) | ||||
self._state = self._STATE_WANT_ARRAY_VALUE | self._state = self._STATE_WANT_ARRAY_VALUE | ||||
elif special == SPECIAL_START_MAP: | elif special == SPECIAL_START_MAP: | ||||
self._collectionstack.append( | self._collectionstack.append( | ||||
{b'remaining': value, b'v': {},} | { | ||||
b'remaining': value, | |||||
b'v': {}, | |||||
} | |||||
) | ) | ||||
self._state = self._STATE_WANT_MAP_KEY | self._state = self._STATE_WANT_MAP_KEY | ||||
elif special == SPECIAL_START_SET: | elif special == SPECIAL_START_SET: | ||||
self._collectionstack.append( | self._collectionstack.append( | ||||
{b'remaining': value, b'v': set(),} | { | ||||
b'remaining': value, | |||||
b'v': set(), | |||||
} | |||||
) | ) | ||||
self._state = self._STATE_WANT_SET_VALUE | self._state = self._STATE_WANT_SET_VALUE | ||||
elif special == SPECIAL_START_INDEFINITE_BYTESTRING: | elif special == SPECIAL_START_INDEFINITE_BYTESTRING: | ||||
self._state = self._STATE_WANT_BYTESTRING_CHUNK_FIRST | self._state = self._STATE_WANT_BYTESTRING_CHUNK_FIRST | ||||
else: | else: | ||||
raise CBORDecodeError( | raise CBORDecodeError( | ||||
elif special == SPECIAL_START_ARRAY: | elif special == SPECIAL_START_ARRAY: | ||||
lastc = self._collectionstack[-1] | lastc = self._collectionstack[-1] | ||||
newvalue = [] | newvalue = [] | ||||
lastc[b'v'].append(newvalue) | lastc[b'v'].append(newvalue) | ||||
lastc[b'remaining'] -= 1 | lastc[b'remaining'] -= 1 | ||||
self._collectionstack.append( | self._collectionstack.append( | ||||
{b'remaining': value, b'v': newvalue,} | { | ||||
b'remaining': value, | |||||
b'v': newvalue, | |||||
} | |||||
) | ) | ||||
# self._state doesn't need changed. | # self._state doesn't need changed. | ||||
# A map nested within an array. | # A map nested within an array. | ||||
elif special == SPECIAL_START_MAP: | elif special == SPECIAL_START_MAP: | ||||
lastc = self._collectionstack[-1] | lastc = self._collectionstack[-1] | ||||
newvalue = {} | newvalue = {} | ||||
elif special == SPECIAL_START_SET: | elif special == SPECIAL_START_SET: | ||||
lastc = self._collectionstack[-1] | lastc = self._collectionstack[-1] | ||||
newvalue = set() | newvalue = set() | ||||
lastc[b'v'].append(newvalue) | lastc[b'v'].append(newvalue) | ||||
lastc[b'remaining'] -= 1 | lastc[b'remaining'] -= 1 | ||||
self._collectionstack.append( | self._collectionstack.append( | ||||
{b'remaining': value, b'v': newvalue,} | { | ||||
b'remaining': value, | |||||
b'v': newvalue, | |||||
} | |||||
) | ) | ||||
self._state = self._STATE_WANT_SET_VALUE | self._state = self._STATE_WANT_SET_VALUE | ||||
elif special == SPECIAL_START_INDEFINITE_BYTESTRING: | elif special == SPECIAL_START_INDEFINITE_BYTESTRING: | ||||
raise CBORDecodeError( | raise CBORDecodeError( | ||||
b'indefinite length bytestrings ' | b'indefinite length bytestrings ' | ||||
b'not allowed as array values' | b'not allowed as array values' | ||||
elif special == SPECIAL_START_ARRAY: | elif special == SPECIAL_START_ARRAY: | ||||
lastc = self._collectionstack[-1] | lastc = self._collectionstack[-1] | ||||
newvalue = [] | newvalue = [] | ||||
lastc[b'v'][self._currentmapkey] = newvalue | lastc[b'v'][self._currentmapkey] = newvalue | ||||
lastc[b'remaining'] -= 1 | lastc[b'remaining'] -= 1 | ||||
self._collectionstack.append( | self._collectionstack.append( | ||||
{b'remaining': value, b'v': newvalue,} | { | ||||
b'remaining': value, | |||||
b'v': newvalue, | |||||
} | |||||
) | ) | ||||
self._state = self._STATE_WANT_ARRAY_VALUE | self._state = self._STATE_WANT_ARRAY_VALUE | ||||
# A new map is used as the map value. | # A new map is used as the map value. | ||||
elif special == SPECIAL_START_MAP: | elif special == SPECIAL_START_MAP: | ||||
lastc = self._collectionstack[-1] | lastc = self._collectionstack[-1] | ||||
newvalue = {} | newvalue = {} | ||||
lastc[b'v'][self._currentmapkey] = newvalue | lastc[b'v'][self._currentmapkey] = newvalue | ||||
lastc[b'remaining'] -= 1 | lastc[b'remaining'] -= 1 | ||||
self._collectionstack.append( | self._collectionstack.append( | ||||
{b'remaining': value, b'v': newvalue,} | { | ||||
b'remaining': value, | |||||
b'v': newvalue, | |||||
} | |||||
) | ) | ||||
self._state = self._STATE_WANT_MAP_KEY | self._state = self._STATE_WANT_MAP_KEY | ||||
# A new set is used as the map value. | # A new set is used as the map value. | ||||
elif special == SPECIAL_START_SET: | elif special == SPECIAL_START_SET: | ||||
lastc = self._collectionstack[-1] | lastc = self._collectionstack[-1] | ||||
newvalue = set() | newvalue = set() | ||||
lastc[b'v'][self._currentmapkey] = newvalue | lastc[b'v'][self._currentmapkey] = newvalue | ||||
lastc[b'remaining'] -= 1 | lastc[b'remaining'] -= 1 | ||||
self._collectionstack.append( | self._collectionstack.append( | ||||
{b'remaining': value, b'v': newvalue,} | { | ||||
b'remaining': value, | |||||
b'v': newvalue, | |||||
} | |||||
) | ) | ||||
self._state = self._STATE_WANT_SET_VALUE | self._state = self._STATE_WANT_SET_VALUE | ||||
elif special == SPECIAL_START_INDEFINITE_BYTESTRING: | elif special == SPECIAL_START_INDEFINITE_BYTESTRING: | ||||
raise CBORDecodeError( | raise CBORDecodeError( | ||||
b'indefinite length bytestrings not ' | b'indefinite length bytestrings not ' | ||||
b'allowed as map values' | b'allowed as map values' |
_ = i18n._ | _ = i18n._ | ||||
# compression code | # compression code | ||||
SERVERROLE = b'server' | SERVERROLE = b'server' | ||||
CLIENTROLE = b'client' | CLIENTROLE = b'client' | ||||
compewireprotosupport = collections.namedtuple( | compewireprotosupport = collections.namedtuple( | ||||
'compenginewireprotosupport', ('name', 'serverpriority', 'clientpriority'), | 'compenginewireprotosupport', | ||||
('name', 'serverpriority', 'clientpriority'), | |||||
) | ) | ||||
class propertycache(object): | class propertycache(object): | ||||
def __init__(self, func): | def __init__(self, func): | ||||
self.func = func | self.func = func | ||||
self.name = func.__name__ | self.name = func.__name__ | ||||
b'%b %d %Y', | b'%b %d %Y', | ||||
b'%b %d', | b'%b %d', | ||||
b'%H:%M:%S', | b'%H:%M:%S', | ||||
b'%I:%M:%S%p', | b'%I:%M:%S%p', | ||||
b'%H:%M', | b'%H:%M', | ||||
b'%I:%M%p', | b'%I:%M%p', | ||||
) | ) | ||||
extendeddateformats = defaultdateformats + (b"%Y", b"%Y-%m", b"%b", b"%b %Y",) | extendeddateformats = defaultdateformats + ( | ||||
b"%Y", | |||||
b"%Y-%m", | |||||
b"%b", | |||||
b"%b %Y", | |||||
) | |||||
def makedate(timestamp=None): | def makedate(timestamp=None): | ||||
'''Return a unix timestamp (or the current time) as a (unixtime, | """Return a unix timestamp (or the current time) as a (unixtime, | ||||
offset) tuple based off the local timezone.''' | offset) tuple based off the local timezone.""" | ||||
if timestamp is None: | if timestamp is None: | ||||
timestamp = time.time() | timestamp = time.time() | ||||
if timestamp < 0: | if timestamp < 0: | ||||
hint = _(b"check your clock") | hint = _(b"check your clock") | ||||
raise error.Abort(_(b"negative timestamp: %d") % timestamp, hint=hint) | raise error.Abort(_(b"negative timestamp: %d") % timestamp, hint=hint) | ||||
delta = datetime.datetime.utcfromtimestamp( | delta = datetime.datetime.utcfromtimestamp( | ||||
timestamp | timestamp | ||||
) - datetime.datetime.fromtimestamp(timestamp) | ) - datetime.datetime.fromtimestamp(timestamp) | ||||
def shortdate(date=None): | def shortdate(date=None): | ||||
"""turn (timestamp, tzoff) tuple into iso 8631 date.""" | """turn (timestamp, tzoff) tuple into iso 8631 date.""" | ||||
return datestr(date, format=b'%Y-%m-%d') | return datestr(date, format=b'%Y-%m-%d') | ||||
def parsetimezone(s): | def parsetimezone(s): | ||||
"""find a trailing timezone, if any, in string, and return a | """find a trailing timezone, if any, in string, and return a | ||||
(offset, remainder) pair""" | (offset, remainder) pair""" | ||||
s = pycompat.bytestr(s) | s = pycompat.bytestr(s) | ||||
if s.endswith(b"GMT") or s.endswith(b"UTC"): | if s.endswith(b"GMT") or s.endswith(b"UTC"): | ||||
return 0, s[:-3].rstrip() | return 0, s[:-3].rstrip() | ||||
# Unix-style timezones [+-]hhmm | # Unix-style timezones [+-]hhmm | ||||
if len(s) >= 5 and s[-5] in b"+-" and s[-4:].isdigit(): | if len(s) >= 5 and s[-5] in b"+-" and s[-4:].isdigit(): | ||||
sign = (s[-5] == b"+") and 1 or -1 | sign = (s[-5] == b"+") and 1 or -1 |
stdin=subprocess.PIPE, | stdin=subprocess.PIPE, | ||||
stdout=subprocess.PIPE, | stdout=subprocess.PIPE, | ||||
) | ) | ||||
pout, perr = p.communicate(s) | pout, perr = p.communicate(s) | ||||
return pout | return pout | ||||
def tempfilter(s, cmd): | def tempfilter(s, cmd): | ||||
'''filter string S through a pair of temporary files with CMD. | """filter string S through a pair of temporary files with CMD. | ||||
CMD is used as a template to create the real command to be run, | CMD is used as a template to create the real command to be run, | ||||
with the strings INFILE and OUTFILE replaced by the real names of | with the strings INFILE and OUTFILE replaced by the real names of | ||||
the temporary files generated.''' | the temporary files generated.""" | ||||
inname, outname = None, None | inname, outname = None, None | ||||
try: | try: | ||||
infd, inname = pycompat.mkstemp(prefix=b'hg-filter-in-') | infd, inname = pycompat.mkstemp(prefix=b'hg-filter-in-') | ||||
fp = os.fdopen(infd, 'wb') | fp = os.fdopen(infd, 'wb') | ||||
fp.write(s) | fp.write(s) | ||||
fp.close() | fp.close() | ||||
outfd, outname = pycompat.mkstemp(prefix=b'hg-filter-out-') | outfd, outname = pycompat.mkstemp(prefix=b'hg-filter-out-') | ||||
os.close(outfd) | os.close(outfd) | ||||
def shelltonative(cmd, env): | def shelltonative(cmd, env): | ||||
return cmd | return cmd | ||||
tonativestr = pycompat.identity | tonativestr = pycompat.identity | ||||
def tonativeenv(env): | def tonativeenv(env): | ||||
'''convert the environment from bytes to strings suitable for Popen(), etc. | """convert the environment from bytes to strings suitable for Popen(), etc.""" | ||||
''' | |||||
return pycompat.rapply(tonativestr, env) | return pycompat.rapply(tonativestr, env) | ||||
def system(cmd, environ=None, cwd=None, out=None): | def system(cmd, environ=None, cwd=None, out=None): | ||||
'''enhanced shell command execution. | """enhanced shell command execution. | ||||
run with environment maybe modified, maybe in different dir. | run with environment maybe modified, maybe in different dir. | ||||
if out is specified, it is assumed to be a file-like object that has a | if out is specified, it is assumed to be a file-like object that has a | ||||
write() method. stdout and stderr will be redirected to out.''' | write() method. stdout and stderr will be redirected to out.""" | ||||
try: | try: | ||||
stdout.flush() | stdout.flush() | ||||
except Exception: | except Exception: | ||||
pass | pass | ||||
env = shellenviron(environ) | env = shellenviron(environ) | ||||
if out is None or isstdout(out): | if out is None or isstdout(out): | ||||
rc = subprocess.call( | rc = subprocess.call( | ||||
tonativestr(cmd), | tonativestr(cmd), | ||||
env, | env, | ||||
shell=False, | shell=False, | ||||
stdout=None, | stdout=None, | ||||
stderr=None, | stderr=None, | ||||
ensurestart=True, | ensurestart=True, | ||||
record_wait=None, | record_wait=None, | ||||
stdin_bytes=None, | stdin_bytes=None, | ||||
): | ): | ||||
'''Spawn a command without waiting for it to finish. | """Spawn a command without waiting for it to finish. | ||||
When `record_wait` is not None, the spawned process will not be fully | When `record_wait` is not None, the spawned process will not be fully | ||||
detached and the `record_wait` argument will be called with a the | detached and the `record_wait` argument will be called with a the | ||||
`Subprocess.wait` function for the spawned process. This is mostly | `Subprocess.wait` function for the spawned process. This is mostly | ||||
useful for developers that need to make sure the spawned process | useful for developers that need to make sure the spawned process | ||||
finished before a certain point. (eg: writing test)''' | finished before a certain point. (eg: writing test)""" | ||||
if pycompat.isdarwin: | if pycompat.isdarwin: | ||||
# avoid crash in CoreFoundation in case another thread | # avoid crash in CoreFoundation in case another thread | ||||
# calls gui() while we're calling fork(). | # calls gui() while we're calling fork(). | ||||
gui() | gui() | ||||
# double-fork to completely detach from the parent process | # double-fork to completely detach from the parent process | ||||
# based on http://code.activestate.com/recipes/278731 | # based on http://code.activestate.com/recipes/278731 | ||||
if record_wait is None: | if record_wait is None: |
if f != -1: | if f != -1: | ||||
return author[:f].strip(b' "').replace(b'\\"', b'"') | return author[:f].strip(b' "').replace(b'\\"', b'"') | ||||
f = author.find(b'@') | f = author.find(b'@') | ||||
return author[:f].replace(b'.', b' ') | return author[:f].replace(b'.', b' ') | ||||
@attr.s(hash=True) | @attr.s(hash=True) | ||||
class mailmapping(object): | class mailmapping(object): | ||||
'''Represents a username/email key or value in | """Represents a username/email key or value in | ||||
a mailmap file''' | a mailmap file""" | ||||
email = attr.ib() | email = attr.ib() | ||||
name = attr.ib(default=None) | name = attr.ib(default=None) | ||||
def _ismailmaplineinvalid(names, emails): | def _ismailmaplineinvalid(names, emails): | ||||
'''Returns True if the parsed names and emails | """Returns True if the parsed names and emails | ||||
in a mailmap entry are invalid. | in a mailmap entry are invalid. | ||||
>>> # No names or emails fails | >>> # No names or emails fails | ||||
>>> names, emails = [], [] | >>> names, emails = [], [] | ||||
>>> _ismailmaplineinvalid(names, emails) | >>> _ismailmaplineinvalid(names, emails) | ||||
True | True | ||||
>>> # Only one email fails | >>> # Only one email fails | ||||
>>> emails = [b'email@email.com'] | >>> emails = [b'email@email.com'] | ||||
>>> _ismailmaplineinvalid(names, emails) | >>> _ismailmaplineinvalid(names, emails) | ||||
True | True | ||||
>>> # One email and one name passes | >>> # One email and one name passes | ||||
>>> names = [b'Test Name'] | >>> names = [b'Test Name'] | ||||
>>> _ismailmaplineinvalid(names, emails) | >>> _ismailmaplineinvalid(names, emails) | ||||
False | False | ||||
>>> # No names but two emails passes | >>> # No names but two emails passes | ||||
>>> names = [] | >>> names = [] | ||||
>>> emails = [b'proper@email.com', b'commit@email.com'] | >>> emails = [b'proper@email.com', b'commit@email.com'] | ||||
>>> _ismailmaplineinvalid(names, emails) | >>> _ismailmaplineinvalid(names, emails) | ||||
False | False | ||||
''' | """ | ||||
return not emails or not names and len(emails) < 2 | return not emails or not names and len(emails) < 2 | ||||
def parsemailmap(mailmapcontent): | def parsemailmap(mailmapcontent): | ||||
"""Parses data in the .mailmap format | """Parses data in the .mailmap format | ||||
>>> mmdata = b"\\n".join([ | >>> mmdata = b"\\n".join([ | ||||
... b'# Comment', | ... b'# Comment', | ||||
# Check to see if we have parsed the line into a valid form | # Check to see if we have parsed the line into a valid form | ||||
# We require at least one email, and either at least one | # We require at least one email, and either at least one | ||||
# name or a second email | # name or a second email | ||||
if _ismailmaplineinvalid(names, emails): | if _ismailmaplineinvalid(names, emails): | ||||
continue | continue | ||||
mailmapkey = mailmapping( | mailmapkey = mailmapping( | ||||
email=emails[-1], name=names[-1] if len(names) == 2 else None, | email=emails[-1], | ||||
name=names[-1] if len(names) == 2 else None, | |||||
) | ) | ||||
mailmap[mailmapkey] = mailmapping( | mailmap[mailmapkey] = mailmapping( | ||||
email=emails[0], name=names[0] if names else None, | email=emails[0], | ||||
name=names[0] if names else None, | |||||
) | ) | ||||
return mailmap | return mailmap | ||||
def mapname(mailmap, author): | def mapname(mailmap, author): | ||||
"""Returns the author field according to the mailmap cache, or | """Returns the author field according to the mailmap cache, or | ||||
the original author field. | the original author field. | ||||
proper.email if proper.email else commit.email, | proper.email if proper.email else commit.email, | ||||
) | ) | ||||
_correctauthorformat = remod.compile(br'^[^<]+\s<[^<>]+@[^<>]+>$') | _correctauthorformat = remod.compile(br'^[^<]+\s<[^<>]+@[^<>]+>$') | ||||
def isauthorwellformed(author): | def isauthorwellformed(author): | ||||
'''Return True if the author field is well formed | """Return True if the author field is well formed | ||||
(ie "Contributor Name <contrib@email.dom>") | (ie "Contributor Name <contrib@email.dom>") | ||||
>>> isauthorwellformed(b'Good Author <good@author.com>') | >>> isauthorwellformed(b'Good Author <good@author.com>') | ||||
True | True | ||||
>>> isauthorwellformed(b'Author <good@author.com>') | >>> isauthorwellformed(b'Author <good@author.com>') | ||||
True | True | ||||
>>> isauthorwellformed(b'Bad Author') | >>> isauthorwellformed(b'Bad Author') | ||||
False | False | ||||
>>> isauthorwellformed(b'Bad Author <author@author.com') | >>> isauthorwellformed(b'Bad Author <author@author.com') | ||||
False | False | ||||
>>> isauthorwellformed(b'Bad Author author@author.com') | >>> isauthorwellformed(b'Bad Author author@author.com') | ||||
False | False | ||||
>>> isauthorwellformed(b'<author@author.com>') | >>> isauthorwellformed(b'<author@author.com>') | ||||
False | False | ||||
>>> isauthorwellformed(b'Bad Author <author>') | >>> isauthorwellformed(b'Bad Author <author>') | ||||
False | False | ||||
''' | """ | ||||
return _correctauthorformat.match(author) is not None | return _correctauthorformat.match(author) is not None | ||||
def ellipsis(text, maxlength=400): | def ellipsis(text, maxlength=400): | ||||
"""Trim string to at most maxlength (default: 400) columns in display.""" | """Trim string to at most maxlength (default: 400) columns in display.""" | ||||
return encoding.trim(text, maxlength, ellipsis=b'...') | return encoding.trim(text, maxlength, ellipsis=b'...') | ||||
return self.readlines(path, mode=mode) | return self.readlines(path, mode=mode) | ||||
except IOError as inst: | except IOError as inst: | ||||
if inst.errno != errno.ENOENT: | if inst.errno != errno.ENOENT: | ||||
raise | raise | ||||
return [] | return [] | ||||
@util.propertycache | @util.propertycache | ||||
def open(self): | def open(self): | ||||
'''Open ``path`` file, which is relative to vfs root. | """Open ``path`` file, which is relative to vfs root. | ||||
Newly created directories are marked as "not to be indexed by | Newly created directories are marked as "not to be indexed by | ||||
the content indexing service", if ``notindexed`` is specified | the content indexing service", if ``notindexed`` is specified | ||||
for "write" mode access. | for "write" mode access. | ||||
''' | """ | ||||
return self.__call__ | return self.__call__ | ||||
def read(self, path): | def read(self, path): | ||||
with self(path, b'rb') as fp: | with self(path, b'rb') as fp: | ||||
return fp.read() | return fp.read() | ||||
def readlines(self, path, mode=b'rb'): | def readlines(self, path, mode=b'rb'): | ||||
with self(path, mode=mode) as fp: | with self(path, mode=mode) as fp: | ||||
def isfile(self, path=None): | def isfile(self, path=None): | ||||
return os.path.isfile(self.join(path)) | return os.path.isfile(self.join(path)) | ||||
def islink(self, path=None): | def islink(self, path=None): | ||||
return os.path.islink(self.join(path)) | return os.path.islink(self.join(path)) | ||||
def isfileorlink(self, path=None): | def isfileorlink(self, path=None): | ||||
'''return whether path is a regular file or a symlink | """return whether path is a regular file or a symlink | ||||
Unlike isfile, this doesn't follow symlinks.''' | Unlike isfile, this doesn't follow symlinks.""" | ||||
try: | try: | ||||
st = self.lstat(path) | st = self.lstat(path) | ||||
except OSError: | except OSError: | ||||
return False | return False | ||||
mode = st.st_mode | mode = st.st_mode | ||||
return stat.S_ISREG(mode) or stat.S_ISLNK(mode) | return stat.S_ISREG(mode) or stat.S_ISLNK(mode) | ||||
def reljoin(self, *paths): | def reljoin(self, *paths): | ||||
_avoidambig(dstpath, oldstat) | _avoidambig(dstpath, oldstat) | ||||
return ret | return ret | ||||
return util.rename(srcpath, dstpath) | return util.rename(srcpath, dstpath) | ||||
def readlink(self, path): | def readlink(self, path): | ||||
return util.readlink(self.join(path)) | return util.readlink(self.join(path)) | ||||
def removedirs(self, path=None): | def removedirs(self, path=None): | ||||
"""Remove a leaf directory and all empty intermediate ones | """Remove a leaf directory and all empty intermediate ones""" | ||||
""" | |||||
return util.removedirs(self.join(path)) | return util.removedirs(self.join(path)) | ||||
def rmdir(self, path=None): | def rmdir(self, path=None): | ||||
"""Remove an empty directory.""" | """Remove an empty directory.""" | ||||
return os.rmdir(self.join(path)) | return os.rmdir(self.join(path)) | ||||
def rmtree(self, path=None, ignore_errors=False, forcibly=False): | def rmtree(self, path=None, ignore_errors=False, forcibly=False): | ||||
"""Remove a directory tree recursively | """Remove a directory tree recursively | ||||
yield bfc | yield bfc | ||||
finally: | finally: | ||||
vfs._backgroundfilecloser = ( | vfs._backgroundfilecloser = ( | ||||
None # pytype: disable=attribute-error | None # pytype: disable=attribute-error | ||||
) | ) | ||||
class vfs(abstractvfs): | class vfs(abstractvfs): | ||||
'''Operate files relative to a base directory | """Operate files relative to a base directory | ||||
This class is used to hide the details of COW semantics and | This class is used to hide the details of COW semantics and | ||||
remote file access from higher level code. | remote file access from higher level code. | ||||
'cacheaudited' should be enabled only if (a) vfs object is short-lived, or | 'cacheaudited' should be enabled only if (a) vfs object is short-lived, or | ||||
(b) the base directory is managed by hg and considered sort-of append-only. | (b) the base directory is managed by hg and considered sort-of append-only. | ||||
See pathutil.pathauditor() for details. | See pathutil.pathauditor() for details. | ||||
''' | """ | ||||
def __init__( | def __init__( | ||||
self, | self, | ||||
base, | base, | ||||
audit=True, | audit=True, | ||||
cacheaudited=False, | cacheaudited=False, | ||||
expandpath=False, | expandpath=False, | ||||
realpath=False, | realpath=False, | ||||
mode=b"r", | mode=b"r", | ||||
atomictemp=False, | atomictemp=False, | ||||
notindexed=False, | notindexed=False, | ||||
backgroundclose=False, | backgroundclose=False, | ||||
checkambig=False, | checkambig=False, | ||||
auditpath=True, | auditpath=True, | ||||
makeparentdirs=True, | makeparentdirs=True, | ||||
): | ): | ||||
'''Open ``path`` file, which is relative to vfs root. | """Open ``path`` file, which is relative to vfs root. | ||||
By default, parent directories are created as needed. Newly created | By default, parent directories are created as needed. Newly created | ||||
directories are marked as "not to be indexed by the content indexing | directories are marked as "not to be indexed by the content indexing | ||||
service", if ``notindexed`` is specified for "write" mode access. | service", if ``notindexed`` is specified for "write" mode access. | ||||
Set ``makeparentdirs=False`` to not create directories implicitly. | Set ``makeparentdirs=False`` to not create directories implicitly. | ||||
If ``backgroundclose`` is passed, the file may be closed asynchronously. | If ``backgroundclose`` is passed, the file may be closed asynchronously. | ||||
It can only be used if the ``self.backgroundclosing()`` context manager | It can only be used if the ``self.backgroundclosing()`` context manager | ||||
only for writing), and is useful only if target file is | only for writing), and is useful only if target file is | ||||
guarded by any lock (e.g. repo.lock or repo.wlock). | guarded by any lock (e.g. repo.lock or repo.wlock). | ||||
To avoid file stat ambiguity forcibly, checkambig=True involves | To avoid file stat ambiguity forcibly, checkambig=True involves | ||||
copying ``path`` file opened in "append" mode (e.g. for | copying ``path`` file opened in "append" mode (e.g. for | ||||
truncation), if it is owned by another. Therefore, use | truncation), if it is owned by another. Therefore, use | ||||
combination of append mode and checkambig=True only in limited | combination of append mode and checkambig=True only in limited | ||||
cases (see also issue5418 and issue5584 for detail). | cases (see also issue5418 and issue5584 for detail). | ||||
''' | """ | ||||
if auditpath: | if auditpath: | ||||
self._auditpath(path, mode) | self._auditpath(path, mode) | ||||
f = self.join(path) | f = self.join(path) | ||||
if b"b" not in mode: | if b"b" not in mode: | ||||
mode += b"b" # for that other OS | mode += b"b" # for that other OS | ||||
nlink = -1 | nlink = -1 |
if not _kernel32.GetFileInformationByHandle(fh, ctypes.byref(fi)): | if not _kernel32.GetFileInformationByHandle(fh, ctypes.byref(fi)): | ||||
_raiseoserror(name) | _raiseoserror(name) | ||||
return fi | return fi | ||||
finally: | finally: | ||||
_kernel32.CloseHandle(fh) | _kernel32.CloseHandle(fh) | ||||
def checkcertificatechain(cert, build=True): | def checkcertificatechain(cert, build=True): | ||||
'''Tests the given certificate to see if there is a complete chain to a | """Tests the given certificate to see if there is a complete chain to a | ||||
trusted root certificate. As a side effect, missing certificates are | trusted root certificate. As a side effect, missing certificates are | ||||
downloaded and installed unless ``build=False``. True is returned if a | downloaded and installed unless ``build=False``. True is returned if a | ||||
chain to a trusted root exists (even if built on the fly), otherwise | chain to a trusted root exists (even if built on the fly), otherwise | ||||
False. NB: A chain to a trusted root does NOT imply that the certificate | False. NB: A chain to a trusted root does NOT imply that the certificate | ||||
is valid. | is valid. | ||||
''' | """ | ||||
chainctxptr = ctypes.POINTER(CERT_CHAIN_CONTEXT) | chainctxptr = ctypes.POINTER(CERT_CHAIN_CONTEXT) | ||||
pchainctx = chainctxptr() | pchainctx = chainctxptr() | ||||
chainpara = CERT_CHAIN_PARA( | chainpara = CERT_CHAIN_PARA( | ||||
cbSize=ctypes.sizeof(CERT_CHAIN_PARA), RequestedUsage=CERT_USAGE_MATCH() | cbSize=ctypes.sizeof(CERT_CHAIN_PARA), RequestedUsage=CERT_USAGE_MATCH() | ||||
) | ) | ||||
def lasterrorwaspipeerror(err): | def lasterrorwaspipeerror(err): | ||||
if err.errno != errno.EINVAL: | if err.errno != errno.EINVAL: | ||||
return False | return False | ||||
err = _kernel32.GetLastError() | err = _kernel32.GetLastError() | ||||
return err == _ERROR_BROKEN_PIPE or err == _ERROR_NO_DATA | return err == _ERROR_BROKEN_PIPE or err == _ERROR_NO_DATA | ||||
def testpid(pid): | def testpid(pid): | ||||
'''return True if pid is still running or unable to | """return True if pid is still running or unable to | ||||
determine, False otherwise''' | determine, False otherwise""" | ||||
h = _kernel32.OpenProcess(_PROCESS_QUERY_INFORMATION, False, pid) | h = _kernel32.OpenProcess(_PROCESS_QUERY_INFORMATION, False, pid) | ||||
if h: | if h: | ||||
try: | try: | ||||
status = _DWORD() | status = _DWORD() | ||||
if _kernel32.GetExitCodeProcess(h, ctypes.byref(status)): | if _kernel32.GetExitCodeProcess(h, ctypes.byref(status)): | ||||
return status.value == _STILL_ACTIVE | return status.value == _STILL_ACTIVE | ||||
finally: | finally: | ||||
_kernel32.CloseHandle(h) | _kernel32.CloseHandle(h) | ||||
raise ctypes.WinError() | raise ctypes.WinError() | ||||
return buf.value | return buf.value | ||||
_signalhandler = [] | _signalhandler = [] | ||||
def setsignalhandler(): | def setsignalhandler(): | ||||
'''Register a termination handler for console events including | """Register a termination handler for console events including | ||||
CTRL+C. python signal handlers do not work well with socket | CTRL+C. python signal handlers do not work well with socket | ||||
operations. | operations. | ||||
''' | """ | ||||
def handler(event): | def handler(event): | ||||
_kernel32.ExitProcess(1) | _kernel32.ExitProcess(1) | ||||
if _signalhandler: | if _signalhandler: | ||||
return # already registered | return # already registered | ||||
h = _SIGNAL_HANDLER(handler) | h = _SIGNAL_HANDLER(handler) | ||||
_signalhandler.append(h) # needed to prevent garbage collection | _signalhandler.append(h) # needed to prevent garbage collection | ||||
if not _kernel32.GetConsoleScreenBufferInfo(screenbuf, ctypes.byref(csbi)): | if not _kernel32.GetConsoleScreenBufferInfo(screenbuf, ctypes.byref(csbi)): | ||||
return width, height | return width, height | ||||
width = csbi.srWindow.Right - csbi.srWindow.Left # don't '+ 1' | width = csbi.srWindow.Right - csbi.srWindow.Left # don't '+ 1' | ||||
height = csbi.srWindow.Bottom - csbi.srWindow.Top + 1 | height = csbi.srWindow.Bottom - csbi.srWindow.Top + 1 | ||||
return width, height | return width, height | ||||
def enablevtmode(): | def enablevtmode(): | ||||
'''Enable virtual terminal mode for the associated console. Return True if | """Enable virtual terminal mode for the associated console. Return True if | ||||
enabled, else False.''' | enabled, else False.""" | ||||
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 | ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 | ||||
handle = _kernel32.GetStdHandle( | handle = _kernel32.GetStdHandle( | ||||
_STD_OUTPUT_HANDLE | _STD_OUTPUT_HANDLE | ||||
) # don't close the handle | ) # don't close the handle | ||||
if handle == _INVALID_HANDLE_VALUE: | if handle == _INVALID_HANDLE_VALUE: | ||||
return False | return False |
def _isatty(fp): | def _isatty(fp): | ||||
try: | try: | ||||
return fp.isatty() | return fp.isatty() | ||||
except AttributeError: | except AttributeError: | ||||
return False | return False | ||||
class winstdout(object): | class winstdout(object): | ||||
'''Some files on Windows misbehave. | """Some files on Windows misbehave. | ||||
When writing to a broken pipe, EINVAL instead of EPIPE may be raised. | When writing to a broken pipe, EINVAL instead of EPIPE may be raised. | ||||
When writing too many bytes to a console at the same, a "Not enough space" | When writing too many bytes to a console at the same, a "Not enough space" | ||||
error may happen. Python 3 already works around that. | error may happen. Python 3 already works around that. | ||||
''' | """ | ||||
def __init__(self, fp): | def __init__(self, fp): | ||||
self.fp = fp | self.fp = fp | ||||
self.throttle = not pycompat.ispy3 and _isatty(fp) | self.throttle = not pycompat.ispy3 and _isatty(fp) | ||||
def __getattr__(self, key): | def __getattr__(self, key): | ||||
return getattr(self.fp, key) | return getattr(self.fp, key) | ||||
# if you change this stub into a real check, please try to implement the | # if you change this stub into a real check, please try to implement the | ||||
# username and groupname functions above, too. | # username and groupname functions above, too. | ||||
def isowner(st): | def isowner(st): | ||||
return True | return True | ||||
def findexe(command): | def findexe(command): | ||||
'''Find executable for command searching like cmd.exe does. | """Find executable for command searching like cmd.exe does. | ||||
If command is a basename then PATH is searched for command. | If command is a basename then PATH is searched for command. | ||||
PATH isn't searched if command is an absolute or relative path. | PATH isn't searched if command is an absolute or relative path. | ||||
An extension from PATHEXT is found and added if not present. | An extension from PATHEXT is found and added if not present. | ||||
If command isn't found None is returned.''' | If command isn't found None is returned.""" | ||||
pathext = encoding.environ.get(b'PATHEXT', b'.COM;.EXE;.BAT;.CMD') | pathext = encoding.environ.get(b'PATHEXT', b'.COM;.EXE;.BAT;.CMD') | ||||
pathexts = [ext for ext in pathext.lower().split(pycompat.ospathsep)] | pathexts = [ext for ext in pathext.lower().split(pycompat.ospathsep)] | ||||
if os.path.splitext(command)[1].lower() in pathexts: | if os.path.splitext(command)[1].lower() in pathexts: | ||||
pathexts = [b''] | pathexts = [b''] | ||||
def findexisting(pathcommand): | def findexisting(pathcommand): | ||||
"""Will append extension (if needed) and return existing file""" | """Will append extension (if needed) and return existing file""" | ||||
for ext in pathexts: | for ext in pathexts: | ||||
return executable | return executable | ||||
return findexisting(os.path.expanduser(os.path.expandvars(command))) | return findexisting(os.path.expanduser(os.path.expandvars(command))) | ||||
_wantedkinds = {stat.S_IFREG, stat.S_IFLNK} | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} | ||||
def statfiles(files): | def statfiles(files): | ||||
'''Stat each file in files. Yield each stat, or None if a file | """Stat each file in files. Yield each stat, or None if a file | ||||
does not exist or has a type we don't care about. | does not exist or has a type we don't care about. | ||||
Cluster and cache stat per directory to minimize number of OS stat calls.''' | Cluster and cache stat per directory to minimize number of OS stat calls.""" | ||||
dircache = {} # dirname -> filename -> status | None if file does not exist | dircache = {} # dirname -> filename -> status | None if file does not exist | ||||
getkind = stat.S_IFMT | getkind = stat.S_IFMT | ||||
for nf in files: | for nf in files: | ||||
nf = normcase(nf) | nf = normcase(nf) | ||||
dir, base = os.path.split(nf) | dir, base = os.path.split(nf) | ||||
if not dir: | if not dir: | ||||
dir = b'.' | dir = b'.' | ||||
cache = dircache.get(dir, None) | cache = dircache.get(dir, None) | ||||
def __init__(self, path): | def __init__(self, path): | ||||
pass | pass | ||||
def cacheable(self): | def cacheable(self): | ||||
return False | return False | ||||
def lookupreg(key, valname=None, scope=None): | def lookupreg(key, valname=None, scope=None): | ||||
''' Look up a key/value name in the Windows registry. | """Look up a key/value name in the Windows registry. | ||||
valname: value name. If unspecified, the default value for the key | valname: value name. If unspecified, the default value for the key | ||||
is used. | is used. | ||||
scope: optionally specify scope for registry lookup, this can be | scope: optionally specify scope for registry lookup, this can be | ||||
a sequence of scopes to look up in order. Default (CURRENT_USER, | a sequence of scopes to look up in order. Default (CURRENT_USER, | ||||
LOCAL_MACHINE). | LOCAL_MACHINE). | ||||
''' | """ | ||||
if scope is None: | if scope is None: | ||||
scope = (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE) | scope = (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE) | ||||
elif not isinstance(scope, (list, tuple)): | elif not isinstance(scope, (list, tuple)): | ||||
scope = (scope,) | scope = (scope,) | ||||
for s in scope: | for s in scope: | ||||
try: | try: | ||||
with winreg.OpenKey(s, encoding.strfromlocal(key)) as hkey: | with winreg.OpenKey(s, encoding.strfromlocal(key)) as hkey: | ||||
name = valname and encoding.strfromlocal(valname) or valname | name = valname and encoding.strfromlocal(valname) or valname |
if done: | if done: | ||||
break | break | ||||
def createalternatelocationresponseframe(stream, requestid, location): | def createalternatelocationresponseframe(stream, requestid, location): | ||||
data = { | data = { | ||||
b'status': b'redirect', | b'status': b'redirect', | ||||
b'location': {b'url': location.url, b'mediatype': location.mediatype,}, | b'location': { | ||||
b'url': location.url, | |||||
b'mediatype': location.mediatype, | |||||
}, | |||||
} | } | ||||
for a in ( | for a in ( | ||||
'size', | 'size', | ||||
'fullhashes', | 'fullhashes', | ||||
'fullhashseed', | 'fullhashseed', | ||||
'serverdercerts', | 'serverdercerts', | ||||
'servercadercerts', | 'servercadercerts', | ||||
payload=payload, | payload=payload, | ||||
encoded=encoded, | encoded=encoded, | ||||
) | ) | ||||
def createcommanderrorresponse(stream, requestid, message, args=None): | def createcommanderrorresponse(stream, requestid, message, args=None): | ||||
# TODO should this be using a list of {'msg': ..., 'args': {}} so atom | # TODO should this be using a list of {'msg': ..., 'args': {}} so atom | ||||
# formatting works consistently? | # formatting works consistently? | ||||
m = {b'status': b'error', b'error': {b'message': message,}} | m = { | ||||
b'status': b'error', | |||||
b'error': { | |||||
b'message': message, | |||||
}, | |||||
} | |||||
if args: | if args: | ||||
m[b'error'][b'args'] = args | m[b'error'][b'args'] = args | ||||
overall = b''.join(cborutil.streamencode(m)) | overall = b''.join(cborutil.streamencode(m)) | ||||
yield stream.makeframe( | yield stream.makeframe( | ||||
requestid=requestid, | requestid=requestid, | ||||
typeid=FRAME_TYPE_COMMAND_RESPONSE, | typeid=FRAME_TYPE_COMMAND_RESPONSE, | ||||
flags=FLAG_COMMAND_RESPONSE_EOS, | flags=FLAG_COMMAND_RESPONSE_EOS, | ||||
payload=overall, | payload=overall, | ||||
) | ) | ||||
def createerrorframe(stream, requestid, msg, errtype): | def createerrorframe(stream, requestid, msg, errtype): | ||||
# TODO properly handle frame size limits. | # TODO properly handle frame size limits. | ||||
assert len(msg) <= DEFAULT_MAX_FRAME_SIZE | assert len(msg) <= DEFAULT_MAX_FRAME_SIZE | ||||
payload = b''.join( | payload = b''.join( | ||||
cborutil.streamencode({b'type': errtype, b'message': [{b'msg': msg}],}) | cborutil.streamencode( | ||||
{ | |||||
b'type': errtype, | |||||
b'message': [{b'msg': msg}], | |||||
} | |||||
) | |||||
) | ) | ||||
yield stream.makeframe( | yield stream.makeframe( | ||||
requestid=requestid, | requestid=requestid, | ||||
typeid=FRAME_TYPE_ERROR_RESPONSE, | typeid=FRAME_TYPE_ERROR_RESPONSE, | ||||
flags=0, | flags=0, | ||||
payload=payload, | payload=payload, | ||||
) | ) | ||||
return b'noop', {} | return b'noop', {} | ||||
# If we buffered all our responses, emit those. | # If we buffered all our responses, emit those. | ||||
def makegen(): | def makegen(): | ||||
for gen in self._bufferedframegens: | for gen in self._bufferedframegens: | ||||
for frame in gen: | for frame in gen: | ||||
yield frame | yield frame | ||||
return b'sendframes', {b'framegen': makegen(),} | return b'sendframes', { | ||||
b'framegen': makegen(), | |||||
} | |||||
def _handlesendframes(self, framegen): | def _handlesendframes(self, framegen): | ||||
if self._deferoutput: | if self._deferoutput: | ||||
self._bufferedframegens.append(framegen) | self._bufferedframegens.append(framegen) | ||||
return b'noop', {} | return b'noop', {} | ||||
else: | else: | ||||
return b'sendframes', {b'framegen': framegen,} | return b'sendframes', { | ||||
b'framegen': framegen, | |||||
} | |||||
def onservererror(self, stream, requestid, msg): | def onservererror(self, stream, requestid, msg): | ||||
ensureserverstream(stream) | ensureserverstream(stream) | ||||
def sendframes(): | def sendframes(): | ||||
for frame in createerrorframe( | for frame in createerrorframe( | ||||
stream, requestid, msg, errtype=b'server' | stream, requestid, msg, errtype=b'server' | ||||
): | ): | ||||
for name in STREAM_ENCODERS_ORDER: | for name in STREAM_ENCODERS_ORDER: | ||||
if name in self._sendersettings[b'contentencodings']: | if name in self._sendersettings[b'contentencodings']: | ||||
s.setencoder(self._ui, name) | s.setencoder(self._ui, name) | ||||
break | break | ||||
return s | return s | ||||
def _makeerrorresult(self, msg): | def _makeerrorresult(self, msg): | ||||
return b'error', {b'message': msg,} | return b'error', { | ||||
b'message': msg, | |||||
} | |||||
def _makeruncommandresult(self, requestid): | def _makeruncommandresult(self, requestid): | ||||
entry = self._receivingcommands[requestid] | entry = self._receivingcommands[requestid] | ||||
if not entry[b'requestdone']: | if not entry[b'requestdone']: | ||||
self._state = b'errored' | self._state = b'errored' | ||||
raise error.ProgrammingError( | raise error.ProgrammingError( | ||||
b'should not be called without requestdone set' | b'should not be called without requestdone set' | ||||
b'command': request[b'name'], | b'command': request[b'name'], | ||||
b'args': request[b'args'], | b'args': request[b'args'], | ||||
b'redirect': request.get(b'redirect'), | b'redirect': request.get(b'redirect'), | ||||
b'data': entry[b'data'].getvalue() if entry[b'data'] else None, | b'data': entry[b'data'].getvalue() if entry[b'data'] else None, | ||||
}, | }, | ||||
) | ) | ||||
def _makewantframeresult(self): | def _makewantframeresult(self): | ||||
return b'wantframe', {b'state': self._state,} | return b'wantframe', { | ||||
b'state': self._state, | |||||
} | |||||
def _validatecommandrequestframe(self, frame): | def _validatecommandrequestframe(self, frame): | ||||
new = frame.flags & FLAG_COMMAND_REQUEST_NEW | new = frame.flags & FLAG_COMMAND_REQUEST_NEW | ||||
continuation = frame.flags & FLAG_COMMAND_REQUEST_CONTINUATION | continuation = frame.flags & FLAG_COMMAND_REQUEST_CONTINUATION | ||||
if new and continuation: | if new and continuation: | ||||
self._state = b'errored' | self._state = b'errored' | ||||
return self._makeerrorresult( | return self._makeerrorresult( | ||||
if not self._hasmultiplesend: | if not self._hasmultiplesend: | ||||
self._cansend = False | self._cansend = False | ||||
self._canissuecommands = False | self._canissuecommands = False | ||||
return ( | return ( | ||||
request, | request, | ||||
b'sendframes', | b'sendframes', | ||||
{b'framegen': self._makecommandframes(request),}, | { | ||||
b'framegen': self._makecommandframes(request), | |||||
}, | |||||
) | ) | ||||
def flushcommands(self): | def flushcommands(self): | ||||
"""Request that all queued commands be sent. | """Request that all queued commands be sent. | ||||
If any commands are buffered, this will instruct the caller to send | If any commands are buffered, this will instruct the caller to send | ||||
them over the wire. If no commands are buffered it instructs the client | them over the wire. If no commands are buffered it instructs the client | ||||
to no-op. | to no-op. | ||||
self._cansend = False | self._cansend = False | ||||
def makeframes(): | def makeframes(): | ||||
while self._pendingrequests: | while self._pendingrequests: | ||||
request = self._pendingrequests.popleft() | request = self._pendingrequests.popleft() | ||||
for frame in self._makecommandframes(request): | for frame in self._makecommandframes(request): | ||||
yield frame | yield frame | ||||
return b'sendframes', {b'framegen': makeframes(),} | return b'sendframes', { | ||||
b'framegen': makeframes(), | |||||
} | |||||
def _makecommandframes(self, request): | def _makecommandframes(self, request): | ||||
"""Emit frames to issue a command request. | """Emit frames to issue a command request. | ||||
As a side-effect, update request accounting to reflect its changed | As a side-effect, update request accounting to reflect its changed | ||||
state. | state. | ||||
""" | """ | ||||
self._activerequests[request.requestid] = request | self._activerequests[request.requestid] = request | ||||
request.state = b'sending' | request.state = b'sending' | ||||
if not self._protocolsettingssent and self._clientcontentencoders: | if not self._protocolsettingssent and self._clientcontentencoders: | ||||
self._protocolsettingssent = True | self._protocolsettingssent = True | ||||
payload = b''.join( | payload = b''.join( | ||||
cborutil.streamencode( | cborutil.streamencode( | ||||
{b'contentencodings': self._clientcontentencoders,} | { | ||||
b'contentencodings': self._clientcontentencoders, | |||||
} | |||||
) | ) | ||||
) | ) | ||||
yield self._outgoingstream.makeframe( | yield self._outgoingstream.makeframe( | ||||
requestid=request.requestid, | requestid=request.requestid, | ||||
typeid=FRAME_TYPE_SENDER_PROTOCOL_SETTINGS, | typeid=FRAME_TYPE_SENDER_PROTOCOL_SETTINGS, | ||||
flags=FLAG_SENDER_PROTOCOL_SETTINGS_EOS, | flags=FLAG_SENDER_PROTOCOL_SETTINGS_EOS, | ||||
payload=payload, | payload=payload, |
NARROWCAP = b'exp-narrow-1' | NARROWCAP = b'exp-narrow-1' | ||||
ELLIPSESCAP1 = b'exp-ellipses-1' | ELLIPSESCAP1 = b'exp-ellipses-1' | ||||
ELLIPSESCAP = b'exp-ellipses-2' | ELLIPSESCAP = b'exp-ellipses-2' | ||||
SUPPORTED_ELLIPSESCAP = (ELLIPSESCAP1, ELLIPSESCAP) | SUPPORTED_ELLIPSESCAP = (ELLIPSESCAP1, ELLIPSESCAP) | ||||
# All available wire protocol transports. | # All available wire protocol transports. | ||||
TRANSPORTS = { | TRANSPORTS = { | ||||
SSHV1: {b'transport': b'ssh', b'version': 1,}, | SSHV1: { | ||||
b'transport': b'ssh', | |||||
b'version': 1, | |||||
}, | |||||
SSHV2: { | SSHV2: { | ||||
b'transport': b'ssh', | b'transport': b'ssh', | ||||
# TODO mark as version 2 once all commands are implemented. | # TODO mark as version 2 once all commands are implemented. | ||||
b'version': 1, | b'version': 1, | ||||
}, | }, | ||||
b'http-v1': {b'transport': b'http', b'version': 1,}, | b'http-v1': { | ||||
HTTP_WIREPROTO_V2: {b'transport': b'http', b'version': 2,}, | b'transport': b'http', | ||||
b'version': 1, | |||||
}, | |||||
HTTP_WIREPROTO_V2: { | |||||
b'transport': b'http', | |||||
b'version': 2, | |||||
}, | |||||
} | } | ||||
class bytesresponse(object): | class bytesresponse(object): | ||||
"""A wire protocol response consisting of raw bytes.""" | """A wire protocol response consisting of raw bytes.""" | ||||
def __init__(self, data): | def __init__(self, data): | ||||
self.data = data | self.data = data |
util as interfaceutil, | util as interfaceutil, | ||||
) | ) | ||||
from .utils import hashutil | from .utils import hashutil | ||||
urlreq = util.urlreq | urlreq = util.urlreq | ||||
def batchable(f): | def batchable(f): | ||||
'''annotation for batchable methods | """annotation for batchable methods | ||||
Such methods must implement a coroutine as follows: | Such methods must implement a coroutine as follows: | ||||
@batchable | @batchable | ||||
def sample(self, one, two=None): | def sample(self, one, two=None): | ||||
# Build list of encoded arguments suitable for your wire protocol: | # Build list of encoded arguments suitable for your wire protocol: | ||||
encargs = [('one', encode(one),), ('two', encode(two),)] | encargs = [('one', encode(one),), ('two', encode(two),)] | ||||
# Create future for injection of encoded result: | # Create future for injection of encoded result: | ||||
encresref = future() | encresref = future() | ||||
# Return encoded arguments and future: | # Return encoded arguments and future: | ||||
yield encargs, encresref | yield encargs, encresref | ||||
# Assuming the future to be filled with the result from the batched | # Assuming the future to be filled with the result from the batched | ||||
# request now. Decode it: | # request now. Decode it: | ||||
yield decode(encresref.value) | yield decode(encresref.value) | ||||
The decorator returns a function which wraps this coroutine as a plain | The decorator returns a function which wraps this coroutine as a plain | ||||
method, but adds the original method as an attribute called "batchable", | method, but adds the original method as an attribute called "batchable", | ||||
which is used by remotebatch to split the call into separate encoding and | which is used by remotebatch to split the call into separate encoding and | ||||
decoding phases. | decoding phases. | ||||
''' | """ | ||||
def plain(*args, **opts): | def plain(*args, **opts): | ||||
batchable = f(*args, **opts) | batchable = f(*args, **opts) | ||||
encargsorres, encresref = next(batchable) | encargsorres, encresref = next(batchable) | ||||
if not encresref: | if not encresref: | ||||
return encargsorres # a local result in this case | return encargsorres # a local result in this case | ||||
self = args[0] | self = args[0] | ||||
cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr | cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr | ||||
opts[key] = value | opts[key] = value | ||||
f = self._callcompressable(b"getbundle", **pycompat.strkwargs(opts)) | f = self._callcompressable(b"getbundle", **pycompat.strkwargs(opts)) | ||||
if any((cap.startswith(b'HG2') for cap in bundlecaps)): | if any((cap.startswith(b'HG2') for cap in bundlecaps)): | ||||
return bundle2.getunbundler(self.ui, f) | return bundle2.getunbundler(self.ui, f) | ||||
else: | else: | ||||
return changegroupmod.cg1unpacker(f, b'UN') | return changegroupmod.cg1unpacker(f, b'UN') | ||||
def unbundle(self, bundle, heads, url): | def unbundle(self, bundle, heads, url): | ||||
'''Send cg (a readable file-like object representing the | """Send cg (a readable file-like object representing the | ||||
changegroup to push, typically a chunkbuffer object) to the | changegroup to push, typically a chunkbuffer object) to the | ||||
remote server as a bundle. | remote server as a bundle. | ||||
When pushing a bundle10 stream, return an integer indicating the | When pushing a bundle10 stream, return an integer indicating the | ||||
result of the push (see changegroup.apply()). | result of the push (see changegroup.apply()). | ||||
When pushing a bundle20 stream, return a bundle20 stream. | When pushing a bundle20 stream, return a bundle20 stream. | ||||
`url` is the url the client thinks it's pushing to, which is | `url` is the url the client thinks it's pushing to, which is | ||||
visible to hooks. | visible to hooks. | ||||
''' | """ | ||||
if heads != [b'force'] and self.capable(b'unbundlehash'): | if heads != [b'force'] and self.capable(b'unbundlehash'): | ||||
heads = wireprototypes.encodelist( | heads = wireprototypes.encodelist( | ||||
[b'hashed', hashutil.sha1(b''.join(sorted(heads))).digest()] | [b'hashed', hashutil.sha1(b''.join(sorted(heads))).digest()] | ||||
) | ) | ||||
else: | else: | ||||
heads = wireprototypes.encodelist(heads) | heads = wireprototypes.encodelist(heads) | ||||
def _calltwowaystream(self, cmd, fp, **args): | def _calltwowaystream(self, cmd, fp, **args): | ||||
"""execute <cmd> on server | """execute <cmd> on server | ||||
The command will send a stream to the server and get a stream in reply. | The command will send a stream to the server and get a stream in reply. | ||||
""" | """ | ||||
raise NotImplementedError() | raise NotImplementedError() | ||||
def _abort(self, exception): | def _abort(self, exception): | ||||
"""clearly abort the wire protocol connection and raise the exception | """clearly abort the wire protocol connection and raise the exception""" | ||||
""" | |||||
raise NotImplementedError() | raise NotImplementedError() |
) | ) | ||||
output = output.getvalue() if output else b'' | output = output.getvalue() if output else b'' | ||||
return wireprototypes.bytesresponse(b'%d\n%s' % (int(r), output)) | return wireprototypes.bytesresponse(b'%d\n%s' % (int(r), output)) | ||||
@wireprotocommand(b'stream_out', permission=b'pull') | @wireprotocommand(b'stream_out', permission=b'pull') | ||||
def stream(repo, proto): | def stream(repo, proto): | ||||
'''If the server supports streaming clone, it advertises the "stream" | """If the server supports streaming clone, it advertises the "stream" | ||||
capability with a value representing the version and flags of the repo | capability with a value representing the version and flags of the repo | ||||
it is serving. Client checks to see if it understands the format. | it is serving. Client checks to see if it understands the format. | ||||
''' | """ | ||||
return wireprototypes.streamreslegacy(streamclone.generatev1wireproto(repo)) | return wireprototypes.streamreslegacy(streamclone.generatev1wireproto(repo)) | ||||
@wireprotocommand(b'unbundle', b'heads', permission=b'push') | @wireprotocommand(b'unbundle', b'heads', permission=b'push') | ||||
def unbundle(repo, proto, heads): | def unbundle(repo, proto, heads): | ||||
their_heads = wireprototypes.decodelist(heads) | their_heads = wireprototypes.decodelist(heads) | ||||
with proto.mayberedirectstdio() as output: | with proto.mayberedirectstdio() as output: |
@wireprotocommand( | @wireprotocommand( | ||||
b'changesetdata', | b'changesetdata', | ||||
args={ | args={ | ||||
b'revisions': { | b'revisions': { | ||||
b'type': b'list', | b'type': b'list', | ||||
b'example': [ | b'example': [ | ||||
{b'type': b'changesetexplicit', b'nodes': [b'abcdef...'],} | { | ||||
b'type': b'changesetexplicit', | |||||
b'nodes': [b'abcdef...'], | |||||
} | |||||
], | ], | ||||
}, | }, | ||||
b'fields': { | b'fields': { | ||||
b'type': b'set', | b'type': b'set', | ||||
b'default': set, | b'default': set, | ||||
b'example': {b'parents', b'revision'}, | b'example': {b'parents', b'revision'}, | ||||
b'validvalues': {b'bookmarks', b'parents', b'phase', b'revision'}, | b'validvalues': {b'bookmarks', b'parents', b'phase', b'revision'}, | ||||
}, | }, | ||||
@wireprotocommand( | @wireprotocommand( | ||||
b'filedata', | b'filedata', | ||||
args={ | args={ | ||||
b'haveparents': { | b'haveparents': { | ||||
b'type': b'bool', | b'type': b'bool', | ||||
b'default': lambda: False, | b'default': lambda: False, | ||||
b'example': True, | b'example': True, | ||||
}, | }, | ||||
b'nodes': {b'type': b'list', b'example': [b'0123456...'],}, | b'nodes': { | ||||
b'type': b'list', | |||||
b'example': [b'0123456...'], | |||||
}, | |||||
b'fields': { | b'fields': { | ||||
b'type': b'set', | b'type': b'set', | ||||
b'default': set, | b'default': set, | ||||
b'example': {b'parents', b'revision'}, | b'example': {b'parents', b'revision'}, | ||||
b'validvalues': {b'parents', b'revision', b'linknode'}, | b'validvalues': {b'parents', b'revision', b'linknode'}, | ||||
}, | }, | ||||
b'path': {b'type': b'bytes', b'example': b'foo.txt',}, | b'path': { | ||||
b'type': b'bytes', | |||||
b'example': b'foo.txt', | |||||
}, | |||||
}, | }, | ||||
permission=b'pull', | permission=b'pull', | ||||
# TODO censoring a file revision won't invalidate the cache. | # TODO censoring a file revision won't invalidate the cache. | ||||
# Figure out a way to take censoring into account when deriving | # Figure out a way to take censoring into account when deriving | ||||
# the cache key. | # the cache key. | ||||
cachekeyfn=makecommandcachekeyfn(b'filedata', 1, allargs=True), | cachekeyfn=makecommandcachekeyfn(b'filedata', 1, allargs=True), | ||||
) | ) | ||||
def filedata(repo, proto, haveparents, nodes, fields, path): | def filedata(repo, proto, haveparents, nodes, fields, path): | ||||
b'pathfilter': { | b'pathfilter': { | ||||
b'type': b'dict', | b'type': b'dict', | ||||
b'default': lambda: None, | b'default': lambda: None, | ||||
b'example': {b'include': [b'path:tests']}, | b'example': {b'include': [b'path:tests']}, | ||||
}, | }, | ||||
b'revisions': { | b'revisions': { | ||||
b'type': b'list', | b'type': b'list', | ||||
b'example': [ | b'example': [ | ||||
{b'type': b'changesetexplicit', b'nodes': [b'abcdef...'],} | { | ||||
b'type': b'changesetexplicit', | |||||
b'nodes': [b'abcdef...'], | |||||
} | |||||
], | ], | ||||
}, | }, | ||||
}, | }, | ||||
permission=b'pull', | permission=b'pull', | ||||
# TODO censoring a file revision won't invalidate the cache. | # TODO censoring a file revision won't invalidate the cache. | ||||
# Figure out a way to take censoring into account when deriving | # Figure out a way to take censoring into account when deriving | ||||
# the cache key. | # the cache key. | ||||
cachekeyfn=makecommandcachekeyfn(b'filesdata', 1, allargs=True), | cachekeyfn=makecommandcachekeyfn(b'filesdata', 1, allargs=True), | ||||
) | ) | ||||
def knownv2(repo, proto, nodes): | def knownv2(repo, proto, nodes): | ||||
result = b''.join(b'1' if n else b'0' for n in repo.known(nodes)) | result = b''.join(b'1' if n else b'0' for n in repo.known(nodes)) | ||||
yield result | yield result | ||||
@wireprotocommand( | @wireprotocommand( | ||||
b'listkeys', | b'listkeys', | ||||
args={b'namespace': {b'type': b'bytes', b'example': b'ns',},}, | args={ | ||||
b'namespace': { | |||||
b'type': b'bytes', | |||||
b'example': b'ns', | |||||
}, | |||||
}, | |||||
permission=b'pull', | permission=b'pull', | ||||
) | ) | ||||
def listkeysv2(repo, proto, namespace): | def listkeysv2(repo, proto, namespace): | ||||
keys = repo.listkeys(encoding.tolocal(namespace)) | keys = repo.listkeys(encoding.tolocal(namespace)) | ||||
keys = { | keys = { | ||||
encoding.fromlocal(k): encoding.fromlocal(v) | encoding.fromlocal(k): encoding.fromlocal(v) | ||||
for k, v in pycompat.iteritems(keys) | for k, v in pycompat.iteritems(keys) | ||||
} | } | ||||
yield keys | yield keys | ||||
@wireprotocommand( | @wireprotocommand( | ||||
b'lookup', | b'lookup', | ||||
args={b'key': {b'type': b'bytes', b'example': b'foo',},}, | args={ | ||||
b'key': { | |||||
b'type': b'bytes', | |||||
b'example': b'foo', | |||||
}, | |||||
}, | |||||
permission=b'pull', | permission=b'pull', | ||||
) | ) | ||||
def lookupv2(repo, proto, key): | def lookupv2(repo, proto, key): | ||||
key = encoding.tolocal(key) | key = encoding.tolocal(key) | ||||
# TODO handle exception. | # TODO handle exception. | ||||
node = repo.lookup(key) | node = repo.lookup(key) | ||||
yield node | yield node | ||||
def manifestdatacapabilities(repo, proto): | def manifestdatacapabilities(repo, proto): | ||||
batchsize = repo.ui.configint( | batchsize = repo.ui.configint( | ||||
b'experimental', b'server.manifestdata.recommended-batch-size' | b'experimental', b'server.manifestdata.recommended-batch-size' | ||||
) | ) | ||||
return { | return { | ||||
b'recommendedbatchsize': batchsize, | b'recommendedbatchsize': batchsize, | ||||
} | } | ||||
@wireprotocommand( | @wireprotocommand( | ||||
b'manifestdata', | b'manifestdata', | ||||
args={ | args={ | ||||
b'nodes': {b'type': b'list', b'example': [b'0123456...'],}, | b'nodes': { | ||||
b'type': b'list', | |||||
b'example': [b'0123456...'], | |||||
}, | |||||
b'haveparents': { | b'haveparents': { | ||||
b'type': b'bool', | b'type': b'bool', | ||||
b'default': lambda: False, | b'default': lambda: False, | ||||
b'example': True, | b'example': True, | ||||
}, | }, | ||||
b'fields': { | b'fields': { | ||||
b'type': b'set', | b'type': b'set', | ||||
b'default': set, | b'default': set, | ||||
b'example': {b'parents', b'revision'}, | b'example': {b'parents', b'revision'}, | ||||
b'validvalues': {b'parents', b'revision'}, | b'validvalues': {b'parents', b'revision'}, | ||||
}, | }, | ||||
b'tree': {b'type': b'bytes', b'example': b'',}, | b'tree': { | ||||
b'type': b'bytes', | |||||
b'example': b'', | |||||
}, | |||||
}, | }, | ||||
permission=b'pull', | permission=b'pull', | ||||
cachekeyfn=makecommandcachekeyfn(b'manifestdata', 1, allargs=True), | cachekeyfn=makecommandcachekeyfn(b'manifestdata', 1, allargs=True), | ||||
extracapabilitiesfn=manifestdatacapabilities, | extracapabilitiesfn=manifestdatacapabilities, | ||||
) | ) | ||||
def manifestdata(repo, proto, haveparents, nodes, fields, tree): | def manifestdata(repo, proto, haveparents, nodes, fields, tree): | ||||
store = repo.manifestlog.getstorage(tree) | store = repo.manifestlog.getstorage(tree) | ||||
for extra in followingdata: | for extra in followingdata: | ||||
yield extra | yield extra | ||||
@wireprotocommand( | @wireprotocommand( | ||||
b'pushkey', | b'pushkey', | ||||
args={ | args={ | ||||
b'namespace': {b'type': b'bytes', b'example': b'ns',}, | b'namespace': { | ||||
b'key': {b'type': b'bytes', b'example': b'key',}, | b'type': b'bytes', | ||||
b'old': {b'type': b'bytes', b'example': b'old',}, | b'example': b'ns', | ||||
b'new': {b'type': b'bytes', b'example': b'new',}, | }, | ||||
b'key': { | |||||
b'type': b'bytes', | |||||
b'example': b'key', | |||||
}, | |||||
b'old': { | |||||
b'type': b'bytes', | |||||
b'example': b'old', | |||||
}, | |||||
b'new': { | |||||
b'type': b'bytes', | |||||
b'example': b'new', | |||||
}, | |||||
}, | }, | ||||
permission=b'push', | permission=b'push', | ||||
) | ) | ||||
def pushkeyv2(repo, proto, namespace, key, old, new): | def pushkeyv2(repo, proto, namespace, key, old, new): | ||||
# TODO handle ui output redirection | # TODO handle ui output redirection | ||||
yield repo.pushkey( | yield repo.pushkey( | ||||
encoding.tolocal(namespace), | encoding.tolocal(namespace), | ||||
encoding.tolocal(key), | encoding.tolocal(key), |
# this overhead can slow down execution. | # this overhead can slow down execution. | ||||
_DISALLOW_THREAD_UNSAFE = pycompat.iswindows | _DISALLOW_THREAD_UNSAFE = pycompat.iswindows | ||||
else: | else: | ||||
_STARTUP_COST = 1e30 | _STARTUP_COST = 1e30 | ||||
_DISALLOW_THREAD_UNSAFE = False | _DISALLOW_THREAD_UNSAFE = False | ||||
def worthwhile(ui, costperop, nops, threadsafe=True): | def worthwhile(ui, costperop, nops, threadsafe=True): | ||||
'''try to determine whether the benefit of multiple processes can | """try to determine whether the benefit of multiple processes can | ||||
outweigh the cost of starting them''' | outweigh the cost of starting them""" | ||||
if not threadsafe and _DISALLOW_THREAD_UNSAFE: | if not threadsafe and _DISALLOW_THREAD_UNSAFE: | ||||
return False | return False | ||||
linear = costperop * nops | linear = costperop * nops | ||||
workers = _numworkers(ui) | workers = _numworkers(ui) | ||||
benefit = linear - (_STARTUP_COST * workers + linear / workers) | benefit = linear - (_STARTUP_COST * workers + linear / workers) | ||||
return benefit >= 0.15 | return benefit >= 0.15 | ||||
def worker( | def worker( | ||||
ui, costperarg, func, staticargs, args, hasretval=False, threadsafe=True | ui, costperarg, func, staticargs, args, hasretval=False, threadsafe=True | ||||
): | ): | ||||
'''run a function, possibly in parallel in multiple worker | """run a function, possibly in parallel in multiple worker | ||||
processes. | processes. | ||||
returns a progress iterator | returns a progress iterator | ||||
costperarg - cost of a single task | costperarg - cost of a single task | ||||
func - function to run. It is expected to return a progress iterator. | func - function to run. It is expected to return a progress iterator. | ||||
staticargs - arguments to pass to every invocation of the function | staticargs - arguments to pass to every invocation of the function | ||||
args - arguments to split into chunks, to pass to individual | args - arguments to split into chunks, to pass to individual | ||||
workers | workers | ||||
hasretval - when True, func and the current function return an progress | hasretval - when True, func and the current function return an progress | ||||
iterator then a dict (encoded as an iterator that yield many (False, ..) | iterator then a dict (encoded as an iterator that yield many (False, ..) | ||||
then a (True, dict)). The dicts are joined in some arbitrary order, so | then a (True, dict)). The dicts are joined in some arbitrary order, so | ||||
overlapping keys are a bad idea. | overlapping keys are a bad idea. | ||||
threadsafe - whether work items are thread safe and can be executed using | threadsafe - whether work items are thread safe and can be executed using | ||||
a thread-based worker. Should be disabled for CPU heavy tasks that don't | a thread-based worker. Should be disabled for CPU heavy tasks that don't | ||||
release the GIL. | release the GIL. | ||||
''' | """ | ||||
enabled = ui.configbool(b'worker', b'enabled') | enabled = ui.configbool(b'worker', b'enabled') | ||||
if enabled and worthwhile(ui, costperarg, len(args), threadsafe=threadsafe): | if enabled and worthwhile(ui, costperarg, len(args), threadsafe=threadsafe): | ||||
return _platformworker(ui, func, staticargs, args, hasretval) | return _platformworker(ui, func, staticargs, args, hasretval) | ||||
return func(*staticargs + (args,)) | return func(*staticargs + (args,)) | ||||
def _posixworker(ui, func, staticargs, args, hasretval): | def _posixworker(ui, func, staticargs, args, hasretval): | ||||
workers = _numworkers(ui) | workers = _numworkers(ui) | ||||
if status < 0: | if status < 0: | ||||
os.kill(os.getpid(), -status) | os.kill(os.getpid(), -status) | ||||
raise error.WorkerError(status) | raise error.WorkerError(status) | ||||
if hasretval: | if hasretval: | ||||
yield True, retval | yield True, retval | ||||
def _posixexitstatus(code): | def _posixexitstatus(code): | ||||
'''convert a posix exit status into the same form returned by | """convert a posix exit status into the same form returned by | ||||
os.spawnv | os.spawnv | ||||
returns None if the process was stopped instead of exiting''' | returns None if the process was stopped instead of exiting""" | ||||
if os.WIFEXITED(code): | if os.WIFEXITED(code): | ||||
return os.WEXITSTATUS(code) | return os.WEXITSTATUS(code) | ||||
elif os.WIFSIGNALED(code): | elif os.WIFSIGNALED(code): | ||||
return -(os.WTERMSIG(code)) | return -(os.WTERMSIG(code)) | ||||
def _windowsworker(ui, func, staticargs, args, hasretval): | def _windowsworker(ui, func, staticargs, args, hasretval): | ||||
class Worker(threading.Thread): | class Worker(threading.Thread): | ||||
if pycompat.iswindows: | if pycompat.iswindows: | ||||
_platformworker = _windowsworker | _platformworker = _windowsworker | ||||
else: | else: | ||||
_platformworker = _posixworker | _platformworker = _posixworker | ||||
_exitstatus = _posixexitstatus | _exitstatus = _posixexitstatus | ||||
def partition(lst, nslices): | def partition(lst, nslices): | ||||
'''partition a list into N slices of roughly equal size | """partition a list into N slices of roughly equal size | ||||
The current strategy takes every Nth element from the input. If | The current strategy takes every Nth element from the input. If | ||||
we ever write workers that need to preserve grouping in input | we ever write workers that need to preserve grouping in input | ||||
we should consider allowing callers to specify a partition strategy. | we should consider allowing callers to specify a partition strategy. | ||||
mpm is not a fan of this partitioning strategy when files are involved. | mpm is not a fan of this partitioning strategy when files are involved. | ||||
In his words: | In his words: | ||||
the revlog ordering on disk by sorting the revlogs by hash and suddenly | the revlog ordering on disk by sorting the revlogs by hash and suddenly | ||||
performance of my kernel checkout benchmark dropped by ~10x because the | performance of my kernel checkout benchmark dropped by ~10x because the | ||||
"working set" of sectors visited no longer fit in the drive's cache and | "working set" of sectors visited no longer fit in the drive's cache and | ||||
the workload switched from streaming to random I/O. | the workload switched from streaming to random I/O. | ||||
What we should really be doing is have workers read filenames from a | What we should really be doing is have workers read filenames from a | ||||
ordered queue. This preserves locality and also keeps any worker from | ordered queue. This preserves locality and also keeps any worker from | ||||
getting more than one file out of balance. | getting more than one file out of balance. | ||||
''' | """ | ||||
for i in range(nslices): | for i in range(nslices): | ||||
yield lst[i::nslices] | yield lst[i::nslices] |
pythonlib = dllbasename[:-4] | pythonlib = dllbasename[:-4] | ||||
# Copy the pythonXY.dll next to the binary so that it runs | # Copy the pythonXY.dll next to the binary so that it runs | ||||
# without tampering with PATH. | # without tampering with PATH. | ||||
fsdecode = lambda x: x | fsdecode = lambda x: x | ||||
if sys.version_info[0] >= 3: | if sys.version_info[0] >= 3: | ||||
fsdecode = os.fsdecode | fsdecode = os.fsdecode | ||||
dest = os.path.join( | dest = os.path.join( | ||||
os.path.dirname(self.hgtarget), fsdecode(dllbasename), | os.path.dirname(self.hgtarget), | ||||
fsdecode(dllbasename), | |||||
) | ) | ||||
if not os.path.exists(dest): | if not os.path.exists(dest): | ||||
shutil.copy(buf.value, dest) | shutil.copy(buf.value, dest) | ||||
if not pythonlib: | if not pythonlib: | ||||
log.warn( | log.warn( | ||||
'could not determine Python DLL filename; assuming pythonXY' | 'could not determine Python DLL filename; assuming pythonXY' | ||||
# Screen out egg related commands to prevent egg generation. But allow | # Screen out egg related commands to prevent egg generation. But allow | ||||
# mercurial.egg-info generation, since that is part of modern | # mercurial.egg-info generation, since that is part of modern | ||||
# packaging. | # packaging. | ||||
excl = {'bdist_egg'} | excl = {'bdist_egg'} | ||||
return filter(lambda x: x not in excl, install.get_sub_commands(self)) | return filter(lambda x: x not in excl, install.get_sub_commands(self)) | ||||
class hginstalllib(install_lib): | class hginstalllib(install_lib): | ||||
''' | """ | ||||
This is a specialization of install_lib that replaces the copy_file used | This is a specialization of install_lib that replaces the copy_file used | ||||
there so that it supports setting the mode of files after copying them, | there so that it supports setting the mode of files after copying them, | ||||
instead of just preserving the mode that the files originally had. If your | instead of just preserving the mode that the files originally had. If your | ||||
system has a umask of something like 027, preserving the permissions when | system has a umask of something like 027, preserving the permissions when | ||||
copying will lead to a broken install. | copying will lead to a broken install. | ||||
Note that just passing keep_permissions=False to copy_file would be | Note that just passing keep_permissions=False to copy_file would be | ||||
insufficient, as it might still be applying a umask. | insufficient, as it might still be applying a umask. | ||||
''' | """ | ||||
def run(self): | def run(self): | ||||
realcopyfile = file_util.copy_file | realcopyfile = file_util.copy_file | ||||
def copyfileandsetmode(*args, **kwargs): | def copyfileandsetmode(*args, **kwargs): | ||||
src, dst = args[0], args[1] | src, dst = args[0], args[1] | ||||
dst, copied = realcopyfile(*args, **kwargs) | dst, copied = realcopyfile(*args, **kwargs) | ||||
if copied: | if copied: | ||||
file_util.copy_file = copyfileandsetmode | file_util.copy_file = copyfileandsetmode | ||||
try: | try: | ||||
install_lib.run(self) | install_lib.run(self) | ||||
finally: | finally: | ||||
file_util.copy_file = realcopyfile | file_util.copy_file = realcopyfile | ||||
class hginstallscripts(install_scripts): | class hginstallscripts(install_scripts): | ||||
''' | """ | ||||
This is a specialization of install_scripts that replaces the @LIBDIR@ with | This is a specialization of install_scripts that replaces the @LIBDIR@ with | ||||
the configured directory for modules. If possible, the path is made relative | the configured directory for modules. If possible, the path is made relative | ||||
to the directory for scripts. | to the directory for scripts. | ||||
''' | """ | ||||
def initialize_options(self): | def initialize_options(self): | ||||
install_scripts.initialize_options(self) | install_scripts.initialize_options(self) | ||||
self.install_lib = None | self.install_lib = None | ||||
def finalize_options(self): | def finalize_options(self): | ||||
install_scripts.finalize_options(self) | install_scripts.finalize_options(self) | ||||
] | ] | ||||
class RustCompilationError(CCompilerError): | class RustCompilationError(CCompilerError): | ||||
"""Exception class for Rust compilation errors.""" | """Exception class for Rust compilation errors.""" | ||||
class RustExtension(Extension): | class RustExtension(Extension): | ||||
"""Base classes for concrete Rust Extension classes. | """Base classes for concrete Rust Extension classes.""" | ||||
""" | |||||
rusttargetdir = os.path.join('rust', 'target', 'release') | rusttargetdir = os.path.join('rust', 'target', 'release') | ||||
def __init__( | def __init__( | ||||
self, mpath, sources, rustlibname, subcrate, py3_features=None, **kw | self, mpath, sources, rustlibname, subcrate, py3_features=None, **kw | ||||
): | ): | ||||
Extension.__init__(self, mpath, sources, **kw) | Extension.__init__(self, mpath, sources, **kw) | ||||
srcdir = self.rustsrcdir = os.path.join('rust', subcrate) | srcdir = self.rustsrcdir = os.path.join('rust', subcrate) | ||||
'mercurial/cext/manifest.c', | 'mercurial/cext/manifest.c', | ||||
'mercurial/cext/parsers.c', | 'mercurial/cext/parsers.c', | ||||
'mercurial/cext/pathencode.c', | 'mercurial/cext/pathencode.c', | ||||
'mercurial/cext/revlog.c', | 'mercurial/cext/revlog.c', | ||||
], | ], | ||||
include_dirs=common_include_dirs, | include_dirs=common_include_dirs, | ||||
extra_compile_args=common_cflags, | extra_compile_args=common_cflags, | ||||
depends=common_depends | depends=common_depends | ||||
+ ['mercurial/cext/charencode.h', 'mercurial/cext/revlog.h',], | + [ | ||||
'mercurial/cext/charencode.h', | |||||
'mercurial/cext/revlog.h', | |||||
], | |||||
), | ), | ||||
Extension( | Extension( | ||||
'mercurial.cext.osutil', | 'mercurial.cext.osutil', | ||||
['mercurial/cext/osutil.c'], | ['mercurial/cext/osutil.c'], | ||||
include_dirs=common_include_dirs, | include_dirs=common_include_dirs, | ||||
extra_compile_args=common_cflags + osutil_cflags, | extra_compile_args=common_cflags + osutil_cflags, | ||||
extra_link_args=osutil_ldflags, | extra_link_args=osutil_ldflags, | ||||
depends=common_depends, | depends=common_depends, | ||||
msvccompilerclass.initialize(self) | msvccompilerclass.initialize(self) | ||||
# "warning LNK4197: export 'func' specified multiple times" | # "warning LNK4197: export 'func' specified multiple times" | ||||
self.ldflags_shared.append('/ignore:4197') | self.ldflags_shared.append('/ignore:4197') | ||||
self.ldflags_shared_debug.append('/ignore:4197') | self.ldflags_shared_debug.append('/ignore:4197') | ||||
msvccompiler.MSVCCompiler = HackedMSVCCompiler | msvccompiler.MSVCCompiler = HackedMSVCCompiler | ||||
packagedata = { | packagedata = { | ||||
'mercurial': ['locale/*/LC_MESSAGES/hg.mo', 'dummycert.pem',], | 'mercurial': [ | ||||
'mercurial.defaultrc': ['*.rc',], | 'locale/*/LC_MESSAGES/hg.mo', | ||||
'mercurial.helptext': ['*.txt',], | 'dummycert.pem', | ||||
'mercurial.helptext.internals': ['*.txt',], | ], | ||||
'mercurial.defaultrc': [ | |||||
'*.rc', | |||||
], | |||||
'mercurial.helptext': [ | |||||
'*.txt', | |||||
], | |||||
'mercurial.helptext.internals': [ | |||||
'*.txt', | |||||
], | |||||
} | } | ||||
def ordinarypath(p): | def ordinarypath(p): | ||||
return p and p[0] != '.' and p[-1] != '~' | return p and p[0] != '.' and p[-1] != '~' | ||||
for root in ('templates',): | for root in ('templates',): |
) | ) | ||||
from mercurial.hgweb import server | from mercurial.hgweb import server | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'badserver', b'closeafteraccept', default=False, | b'badserver', | ||||
b'closeafteraccept', | |||||
default=False, | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'badserver', b'closeafterrecvbytes', default=b'0', | b'badserver', | ||||
b'closeafterrecvbytes', | |||||
default=b'0', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'badserver', b'closeaftersendbytes', default=b'0', | b'badserver', | ||||
b'closeaftersendbytes', | |||||
default=b'0', | |||||
) | ) | ||||
configitem( | configitem( | ||||
b'badserver', b'closebeforeaccept', default=False, | b'badserver', | ||||
b'closebeforeaccept', | |||||
default=False, | |||||
) | ) | ||||
# We can't adjust __class__ on a socket instance. So we define a proxy type. | # We can't adjust __class__ on a socket instance. So we define a proxy type. | ||||
class socketproxy(object): | class socketproxy(object): | ||||
__slots__ = ( | __slots__ = ( | ||||
'_orig', | '_orig', | ||||
'_logfp', | '_logfp', | ||||
'_closeafterrecvbytes', | '_closeafterrecvbytes', |
rustext.__name__ # force actual import (see hgdemandimport) | rustext.__name__ # force actual import (see hgdemandimport) | ||||
except ImportError: | except ImportError: | ||||
rustext = None | rustext = None | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'fakedirstatewritetime', b'fakenow', default=None, | b'fakedirstatewritetime', | ||||
b'fakenow', | |||||
default=None, | |||||
) | ) | ||||
parsers = policy.importmod('parsers') | parsers = policy.importmod('parsers') | ||||
rustmod = policy.importrust('parsers') | rustmod = policy.importrust('parsers') | ||||
def pack_dirstate(fakenow, orig, dmap, copymap, pl, now): | def pack_dirstate(fakenow, orig, dmap, copymap, pl, now): | ||||
# execute what original parsers.pack_dirstate should do actually | # execute what original parsers.pack_dirstate should do actually |
# extension to emulate invoking 'patch.internalpatch()' at the time | # extension to emulate invoking 'patch.internalpatch()' at the time | ||||
# specified by '[fakepatchtime] fakenow' | # specified by '[fakepatchtime] fakenow' | ||||
from __future__ import absolute_import | from __future__ import absolute_import | ||||
from mercurial import ( | from mercurial import ( | ||||
extensions, | extensions, | ||||
patch as patchmod, | patch as patchmod, | ||||
registrar, | registrar, | ||||
) | ) | ||||
from mercurial.utils import dateutil | from mercurial.utils import dateutil | ||||
configtable = {} | configtable = {} | ||||
configitem = registrar.configitem(configtable) | configitem = registrar.configitem(configtable) | ||||
configitem( | configitem( | ||||
b'fakepatchtime', b'fakenow', default=None, | b'fakepatchtime', | ||||
b'fakenow', | |||||
default=None, | |||||
) | ) | ||||
def internalpatch( | def internalpatch( | ||||
orig, | orig, | ||||
ui, | ui, | ||||
repo, | repo, | ||||
patchobj, | patchobj, |
revlog.REVIDX_FLAGS_ORDER.extend(flags) | revlog.REVIDX_FLAGS_ORDER.extend(flags) | ||||
# Teach exchange to use changegroup 3 | # Teach exchange to use changegroup 3 | ||||
for k in bundlecaches._bundlespeccontentopts.keys(): | for k in bundlecaches._bundlespeccontentopts.keys(): | ||||
bundlecaches._bundlespeccontentopts[k][b"cg.version"] = b"03" | bundlecaches._bundlespeccontentopts[k][b"cg.version"] = b"03" | ||||
# Register flag processors for each extension | # Register flag processors for each extension | ||||
flagutil.addflagprocessor( | flagutil.addflagprocessor( | ||||
REVIDX_NOOP, (noopdonothingread, noopdonothing, validatehash,) | REVIDX_NOOP, | ||||
( | |||||
noopdonothingread, | |||||
noopdonothing, | |||||
validatehash, | |||||
), | |||||
) | ) | ||||
flagutil.addflagprocessor( | flagutil.addflagprocessor( | ||||
REVIDX_BASE64, (b64decode, b64encode, bypass,), | REVIDX_BASE64, | ||||
( | |||||
b64decode, | |||||
b64encode, | |||||
bypass, | |||||
), | |||||
) | ) | ||||
flagutil.addflagprocessor( | flagutil.addflagprocessor( | ||||
REVIDX_GZIP, (gzipdecompress, gzipcompress, bypass) | REVIDX_GZIP, (gzipdecompress, gzipcompress, bypass) | ||||
) | ) |
@check('black', 'the black formatter for python') | @check('black', 'the black formatter for python') | ||||
def has_black(): | def has_black(): | ||||
blackcmd = 'black --version' | blackcmd = 'black --version' | ||||
version_regex = b'black, version ([0-9a-b.]+)' | version_regex = b'black, version ([0-9a-b.]+)' | ||||
version = matchoutput(blackcmd, version_regex) | version = matchoutput(blackcmd, version_regex) | ||||
sv = distutils.version.StrictVersion | sv = distutils.version.StrictVersion | ||||
return version and sv(_bytes2sys(version.group(1))) >= sv('19.10b0') | return version and sv(_bytes2sys(version.group(1))) >= sv('20.8b1') | ||||
@check('pytype', 'the pytype type checker') | @check('pytype', 'the pytype type checker') | ||||
def has_pytype(): | def has_pytype(): | ||||
pytypecmd = 'pytype --version' | pytypecmd = 'pytype --version' | ||||
version = matchoutput(pytypecmd, b'[0-9a-b.]+') | version = matchoutput(pytypecmd, b'[0-9a-b.]+') | ||||
sv = distutils.version.StrictVersion | sv = distutils.version.StrictVersion | ||||
return version and sv(_bytes2sys(version.group(0))) >= sv('2019.10.17') | return version and sv(_bytes2sys(version.group(0))) >= sv('2019.10.17') |
except Exception: | except Exception: | ||||
traceback.print_exc(file=sys.stdout) | traceback.print_exc(file=sys.stdout) | ||||
sys.exit(1) | sys.exit(1) | ||||
return accept | return accept | ||||
def roundtrips(data, decode, encode): | def roundtrips(data, decode, encode): | ||||
"""helper to tests function that must do proper encode/decode roundtripping | """helper to tests function that must do proper encode/decode roundtripping""" | ||||
""" | |||||
@given(data) | @given(data) | ||||
def testroundtrips(value): | def testroundtrips(value): | ||||
encoded = encode(value) | encoded = encode(value) | ||||
decoded = decode(encoded) | decoded = decode(encoded) | ||||
if decoded != value: | if decoded != value: | ||||
raise ValueError( | raise ValueError( | ||||
"Round trip failed: %s(%r) -> %s(%r) -> %r" | "Round trip failed: %s(%r) -> %s(%r) -> %r" | ||||
print("Round trip OK") | print("Round trip OK") | ||||
# strategy for generating bytestring that might be an issue for Mercurial | # strategy for generating bytestring that might be an issue for Mercurial | ||||
bytestrings = ( | bytestrings = ( | ||||
st.builds( | st.builds( | ||||
lambda s, e: s.encode(e), | lambda s, e: s.encode(e), | ||||
st.text(), | st.text(), | ||||
st.sampled_from(['utf-8', 'utf-16',]), | st.sampled_from( | ||||
[ | |||||
'utf-8', | |||||
'utf-16', | |||||
] | |||||
), | |||||
) | ) | ||||
) | st.binary() | ) | st.binary() |
hgconf = parser.add_argument_group('Mercurial Configuration') | hgconf = parser.add_argument_group('Mercurial Configuration') | ||||
hgconf.add_argument( | hgconf.add_argument( | ||||
"--chg", | "--chg", | ||||
action="store_true", | action="store_true", | ||||
help="install and use chg wrapper in place of hg", | help="install and use chg wrapper in place of hg", | ||||
) | ) | ||||
hgconf.add_argument( | hgconf.add_argument( | ||||
"--chg-debug", action="store_true", help="show chg debug logs", | "--chg-debug", | ||||
action="store_true", | |||||
help="show chg debug logs", | |||||
) | ) | ||||
hgconf.add_argument("--compiler", help="compiler to build with") | hgconf.add_argument("--compiler", help="compiler to build with") | ||||
hgconf.add_argument( | hgconf.add_argument( | ||||
'--extra-config-opt', | '--extra-config-opt', | ||||
action="append", | action="append", | ||||
default=[], | default=[], | ||||
help='set the given config opt in the test hgrc', | help='set the given config opt in the test hgrc', | ||||
) | ) | ||||
"""Tasks to perform after run().""" | """Tasks to perform after run().""" | ||||
for entry in self._daemonpids: | for entry in self._daemonpids: | ||||
killdaemons(entry) | killdaemons(entry) | ||||
self._daemonpids = [] | self._daemonpids = [] | ||||
if self._keeptmpdir: | if self._keeptmpdir: | ||||
log( | log( | ||||
'\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' | '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' | ||||
% (_bytes2sys(self._testtmp), _bytes2sys(self._threadtmp),) | % ( | ||||
_bytes2sys(self._testtmp), | |||||
_bytes2sys(self._threadtmp), | |||||
) | |||||
) | ) | ||||
else: | else: | ||||
try: | try: | ||||
shutil.rmtree(self._testtmp) | shutil.rmtree(self._testtmp) | ||||
except OSError: | except OSError: | ||||
# unreadable directory may be left in $TESTTMP; fix permission | # unreadable directory may be left in $TESTTMP; fix permission | ||||
# and try again | # and try again | ||||
makecleanable(self._testtmp) | makecleanable(self._testtmp) | ||||
if os.altsep: | if os.altsep: | ||||
_l = l.replace(b'\\', b'/') | _l = l.replace(b'\\', b'/') | ||||
if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l: | if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l: | ||||
return True, True | return True, True | ||||
return retry, True | return retry, True | ||||
@staticmethod | @staticmethod | ||||
def parsehghaveoutput(lines): | def parsehghaveoutput(lines): | ||||
'''Parse hghave log lines. | """Parse hghave log lines. | ||||
Return tuple of lists (missing, failed): | Return tuple of lists (missing, failed): | ||||
* the missing/unknown features | * the missing/unknown features | ||||
* the features for which existence check failed''' | * the features for which existence check failed""" | ||||
missing = [] | missing = [] | ||||
failed = [] | failed = [] | ||||
for line in lines: | for line in lines: | ||||
if line.startswith(TTest.SKIPPED_PREFIX): | if line.startswith(TTest.SKIPPED_PREFIX): | ||||
line = line.splitlines()[0] | line = line.splitlines()[0] | ||||
missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :])) | missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :])) | ||||
elif line.startswith(TTest.FAILED_PREFIX): | elif line.startswith(TTest.FAILED_PREFIX): | ||||
line = line.splitlines()[0] | line = line.splitlines()[0] | ||||
if options.color == 'auto': | if options.color == 'auto': | ||||
self.color = pygmentspresent and self.stream.isatty() | self.color = pygmentspresent and self.stream.isatty() | ||||
elif options.color == 'never': | elif options.color == 'never': | ||||
self.color = False | self.color = False | ||||
else: # 'always', for testing purposes | else: # 'always', for testing purposes | ||||
self.color = pygmentspresent | self.color = pygmentspresent | ||||
def onStart(self, test): | def onStart(self, test): | ||||
""" Can be overriden by custom TestResult | """Can be overriden by custom TestResult""" | ||||
""" | |||||
def onEnd(self): | def onEnd(self): | ||||
""" Can be overriden by custom TestResult | """Can be overriden by custom TestResult""" | ||||
""" | |||||
def addFailure(self, test, reason): | def addFailure(self, test, reason): | ||||
self.failures.append((test, reason)) | self.failures.append((test, reason)) | ||||
if self._options.first: | if self._options.first: | ||||
self.stop() | self.stop() | ||||
else: | else: | ||||
with iolock: | with iolock: | ||||
'extensions.logexceptions=%s' % logexceptions.decode('utf-8') | 'extensions.logexceptions=%s' % logexceptions.decode('utf-8') | ||||
) | ) | ||||
vlog("# Using TESTDIR", _bytes2sys(self._testdir)) | vlog("# Using TESTDIR", _bytes2sys(self._testdir)) | ||||
vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR'])) | vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR'])) | ||||
vlog("# Using HGTMP", _bytes2sys(self._hgtmp)) | vlog("# Using HGTMP", _bytes2sys(self._hgtmp)) | ||||
vlog("# Using PATH", os.environ["PATH"]) | vlog("# Using PATH", os.environ["PATH"]) | ||||
vlog( | vlog( | ||||
"# Using", _bytes2sys(IMPL_PATH), _bytes2sys(osenvironb[IMPL_PATH]), | "# Using", | ||||
_bytes2sys(IMPL_PATH), | |||||
_bytes2sys(osenvironb[IMPL_PATH]), | |||||
) | ) | ||||
vlog("# Writing to directory", _bytes2sys(self._outputdir)) | vlog("# Writing to directory", _bytes2sys(self._outputdir)) | ||||
try: | try: | ||||
return self._runtests(testdescs) or 0 | return self._runtests(testdescs) or 0 | ||||
finally: | finally: | ||||
time.sleep(0.1) | time.sleep(0.1) | ||||
self._cleanup() | self._cleanup() |
# replace a single chunk | # replace a single chunk | ||||
testfilefixup(case0, b'', [b'', b'']) | testfilefixup(case0, b'', [b'', b'']) | ||||
testfilefixup(case0, b'2', [b'', b'2']) | testfilefixup(case0, b'2', [b'', b'2']) | ||||
testfilefixup(case0, b'22', [b'', b'22']) | testfilefixup(case0, b'22', [b'', b'22']) | ||||
testfilefixup(case0, b'222', [b'', b'222']) | testfilefixup(case0, b'222', [b'', b'222']) | ||||
# input case 1: 3 lines, each commit adds one line | # input case 1: 3 lines, each commit adds one line | ||||
case1 = buildcontents([(b'1', [1, 2, 3]), (b'2', [2, 3]), (b'3', [3]),]) | case1 = buildcontents( | ||||
[ | |||||
(b'1', [1, 2, 3]), | |||||
(b'2', [2, 3]), | |||||
(b'3', [3]), | |||||
] | |||||
) | |||||
# 1:1 line mapping | # 1:1 line mapping | ||||
testfilefixup(case1, b'123', case1) | testfilefixup(case1, b'123', case1) | ||||
testfilefixup(case1, b'12c', [b'', b'1', b'12', b'12c']) | testfilefixup(case1, b'12c', [b'', b'1', b'12', b'12c']) | ||||
testfilefixup(case1, b'1b3', [b'', b'1', b'1b', b'1b3']) | testfilefixup(case1, b'1b3', [b'', b'1', b'1b', b'1b3']) | ||||
testfilefixup(case1, b'1bc', [b'', b'1', b'1b', b'1bc']) | testfilefixup(case1, b'1bc', [b'', b'1', b'1b', b'1bc']) | ||||
testfilefixup(case1, b'a23', [b'', b'a', b'a2', b'a23']) | testfilefixup(case1, b'a23', [b'', b'a', b'a2', b'a23']) | ||||
testfilefixup(case1, b'a2c', [b'', b'a', b'a2', b'a2c']) | testfilefixup(case1, b'a2c', [b'', b'a', b'a2', b'a2c']) | ||||
testfilefixup(case1, b'123c', [b'', b'1', b'12', b'123c']) | testfilefixup(case1, b'123c', [b'', b'1', b'12', b'123c']) | ||||
testfilefixup(case1, b'a123', [b'', b'a1', b'a12', b'a123']) | testfilefixup(case1, b'a123', [b'', b'a1', b'a12', b'a123']) | ||||
# (confusing) insertions | # (confusing) insertions | ||||
testfilefixup(case1, b'1a23', case1) | testfilefixup(case1, b'1a23', case1) | ||||
testfilefixup(case1, b'12b3', case1) | testfilefixup(case1, b'12b3', case1) | ||||
# input case 2: delete in the middle | # input case 2: delete in the middle | ||||
case2 = buildcontents([(b'11', [1, 2]), (b'22', [1]), (b'33', [1, 2]),]) | case2 = buildcontents( | ||||
[ | |||||
(b'11', [1, 2]), | |||||
(b'22', [1]), | |||||
(b'33', [1, 2]), | |||||
] | |||||
) | |||||
# deletion (optimize code should make it 2 chunks) | # deletion (optimize code should make it 2 chunks) | ||||
testfilefixup( | testfilefixup( | ||||
case2, b'', [b'', b'22', b''], fixups=[(4, 0, 2, 0, 0), (4, 2, 4, 0, 0)] | case2, b'', [b'', b'22', b''], fixups=[(4, 0, 2, 0, 0), (4, 2, 4, 0, 0)] | ||||
) | ) | ||||
# 1:1 line mapping | # 1:1 line mapping | ||||
testfilefixup(case2, b'aaaa', [b'', b'aa22aa', b'aaaa']) | testfilefixup(case2, b'aaaa', [b'', b'aa22aa', b'aaaa']) | ||||
# non 1:1 edits | # non 1:1 edits | ||||
# note: unlike case0, the chunk is not "continuous" and no edit allowed | # note: unlike case0, the chunk is not "continuous" and no edit allowed | ||||
testfilefixup(case2, b'aaa', case2) | testfilefixup(case2, b'aaa', case2) | ||||
# input case 3: rev 3 reverts rev 2 | # input case 3: rev 3 reverts rev 2 | ||||
case3 = buildcontents([(b'1', [1, 2, 3]), (b'2', [2]), (b'3', [1, 2, 3]),]) | case3 = buildcontents( | ||||
[ | |||||
(b'1', [1, 2, 3]), | |||||
(b'2', [2]), | |||||
(b'3', [1, 2, 3]), | |||||
] | |||||
) | |||||
# 1:1 line mapping | # 1:1 line mapping | ||||
testfilefixup(case3, b'13', case3) | testfilefixup(case3, b'13', case3) | ||||
testfilefixup(case3, b'1b', [b'', b'1b', b'12b', b'1b']) | testfilefixup(case3, b'1b', [b'', b'1b', b'12b', b'1b']) | ||||
testfilefixup(case3, b'a3', [b'', b'a3', b'a23', b'a3']) | testfilefixup(case3, b'a3', [b'', b'a3', b'a23', b'a3']) | ||||
testfilefixup(case3, b'ab', [b'', b'ab', b'a2b', b'ab']) | testfilefixup(case3, b'ab', [b'', b'ab', b'a2b', b'ab']) | ||||
# non 1:1 edits | # non 1:1 edits | ||||
testfilefixup(case3, b'a', case3) | testfilefixup(case3, b'a', case3) | ||||
testfilefixup(case3, b'abc', case3) | testfilefixup(case3, b'abc', case3) | ||||
# deletion | # deletion | ||||
testfilefixup(case3, b'', [b'', b'', b'2', b'']) | testfilefixup(case3, b'', [b'', b'', b'2', b'']) | ||||
# insertion | # insertion | ||||
testfilefixup(case3, b'a13c', [b'', b'a13c', b'a123c', b'a13c']) | testfilefixup(case3, b'a13c', [b'', b'a13c', b'a123c', b'a13c']) | ||||
# input case 4: a slightly complex case | # input case 4: a slightly complex case | ||||
case4 = buildcontents( | case4 = buildcontents( | ||||
[ | [ | ||||
(b'1', [1, 2, 3]), | (b'1', [1, 2, 3]), | ||||
(b'2', [2, 3]), | (b'2', [2, 3]), | ||||
(b'3', [1, 2,]), | ( | ||||
b'3', | |||||
[ | |||||
1, | |||||
2, | |||||
], | |||||
), | |||||
(b'4', [1, 3]), | (b'4', [1, 3]), | ||||
(b'5', [3]), | (b'5', [3]), | ||||
(b'6', [2, 3]), | (b'6', [2, 3]), | ||||
(b'7', [2]), | (b'7', [2]), | ||||
(b'8', [2, 3]), | (b'8', [2, 3]), | ||||
(b'9', [3]), | (b'9', [3]), | ||||
] | ] | ||||
) | ) | ||||
testfilefixup(case4, b'1245689', case4) | testfilefixup(case4, b'1245689', case4) | ||||
testfilefixup(case4, b'1a2456bbb', case4) | testfilefixup(case4, b'1a2456bbb', case4) | ||||
testfilefixup(case4, b'1abc5689', case4) | testfilefixup(case4, b'1abc5689', case4) | ||||
testfilefixup(case4, b'1ab5689', [b'', b'134', b'1a3678', b'1ab5689']) | testfilefixup(case4, b'1ab5689', [b'', b'134', b'1a3678', b'1ab5689']) | ||||
testfilefixup(case4, b'aa2bcd8ee', [b'', b'aa34', b'aa23d78', b'aa2bcd8ee']) | testfilefixup(case4, b'aa2bcd8ee', [b'', b'aa34', b'aa23d78', b'aa2bcd8ee']) | ||||
testfilefixup(case4, b'aa2bcdd8ee', [b'', b'aa34', b'aa23678', b'aa24568ee']) | testfilefixup(case4, b'aa2bcdd8ee', [b'', b'aa34', b'aa23678', b'aa24568ee']) | ||||
testfilefixup(case4, b'aaaaaa', case4) | testfilefixup(case4, b'aaaaaa', case4) | ||||
testfilefixup(case4, b'aa258b', [b'', b'aa34', b'aa2378', b'aa258b']) | testfilefixup(case4, b'aa258b', [b'', b'aa34', b'aa2378', b'aa258b']) | ||||
testfilefixup(case4, b'25bb', [b'', b'34', b'23678', b'25689']) | testfilefixup(case4, b'25bb', [b'', b'34', b'23678', b'25689']) | ||||
testfilefixup(case4, b'27', [b'', b'34', b'23678', b'245689']) | testfilefixup(case4, b'27', [b'', b'34', b'23678', b'245689']) | ||||
testfilefixup(case4, b'28', [b'', b'34', b'2378', b'28']) | testfilefixup(case4, b'28', [b'', b'34', b'2378', b'28']) | ||||
testfilefixup(case4, b'', [b'', b'34', b'37', b'']) | testfilefixup(case4, b'', [b'', b'34', b'37', b'']) | ||||
# input case 5: replace a small chunk which is near a deleted line | # input case 5: replace a small chunk which is near a deleted line | ||||
case5 = buildcontents([(b'12', [1, 2]), (b'3', [1]), (b'4', [1, 2]),]) | case5 = buildcontents( | ||||
[ | |||||
(b'12', [1, 2]), | |||||
(b'3', [1]), | |||||
(b'4', [1, 2]), | |||||
] | |||||
) | |||||
testfilefixup(case5, b'1cd4', [b'', b'1cd34', b'1cd4']) | testfilefixup(case5, b'1cd4', [b'', b'1cd34', b'1cd4']) | ||||
# input case 6: base "changeset" is immutable | # input case 6: base "changeset" is immutable | ||||
case6 = [b'1357', b'0125678'] | case6 = [b'1357', b'0125678'] | ||||
testfilefixup(case6, b'0125678', case6) | testfilefixup(case6, b'0125678', case6) | ||||
testfilefixup(case6, b'0a25678', case6) | testfilefixup(case6, b'0a25678', case6) |
) | ) | ||||
if pycompat.ispy3: | if pycompat.ispy3: | ||||
long = int | long = int | ||||
xrange = range | xrange = range | ||||
def buildgraph(rng, nodes=100, rootprob=0.05, mergeprob=0.2, prevprob=0.7): | def buildgraph(rng, nodes=100, rootprob=0.05, mergeprob=0.2, prevprob=0.7): | ||||
'''nodes: total number of nodes in the graph | """nodes: total number of nodes in the graph | ||||
rootprob: probability that a new node (not 0) will be a root | rootprob: probability that a new node (not 0) will be a root | ||||
mergeprob: probability that, excluding a root a node will be a merge | mergeprob: probability that, excluding a root a node will be a merge | ||||
prevprob: probability that p1 will be the previous node | prevprob: probability that p1 will be the previous node | ||||
return value is a graph represented as an adjacency list. | return value is a graph represented as an adjacency list. | ||||
''' | """ | ||||
graph = [None] * nodes | graph = [None] * nodes | ||||
for i in xrange(nodes): | for i in xrange(nodes): | ||||
if i == 0 or rng.random() < rootprob: | if i == 0 or rng.random() < rootprob: | ||||
graph[i] = [nullrev] | graph[i] = [nullrev] | ||||
elif i == 1: | elif i == 1: | ||||
graph[i] = [0] | graph[i] = [0] | ||||
elif rng.random() < mergeprob: | elif rng.random() < mergeprob: | ||||
if i == 2 or rng.random() < prevprob: | if i == 2 or rng.random() < prevprob: | ||||
) | ) | ||||
): | ): | ||||
print("%% removeancestorsfrom(), example %d" % (i + 1)) | print("%% removeancestorsfrom(), example %d" % (i + 1)) | ||||
missanc = ancestor.incrementalmissingancestors(graph.get, bases) | missanc = ancestor.incrementalmissingancestors(graph.get, bases) | ||||
missanc.removeancestorsfrom(revs) | missanc.removeancestorsfrom(revs) | ||||
print("remaining (sorted): %s" % sorted(list(revs))) | print("remaining (sorted): %s" % sorted(list(revs))) | ||||
for i, (bases, revs) in enumerate( | for i, (bases, revs) in enumerate( | ||||
(({10}, {11}), ({11}, {10}), ({7}, {9, 11}),) | ( | ||||
({10}, {11}), | |||||
({11}, {10}), | |||||
({7}, {9, 11}), | |||||
) | |||||
): | ): | ||||
print("%% missingancestors(), example %d" % (i + 1)) | print("%% missingancestors(), example %d" % (i + 1)) | ||||
missanc = ancestor.incrementalmissingancestors(graph.get, bases) | missanc = ancestor.incrementalmissingancestors(graph.get, bases) | ||||
print("return %s" % missanc.missingancestors(revs)) | print("return %s" % missanc.missingancestors(revs)) | ||||
print("% removeancestorsfrom(), bigger graph") | print("% removeancestorsfrom(), bigger graph") | ||||
vecgraph = [ | vecgraph = [ | ||||
[-1, -1], | [-1, -1], |
def hello(self): | def hello(self): | ||||
return b"Ready." | return b"Ready." | ||||
# equivalent of localrepo.localrepository | # equivalent of localrepo.localrepository | ||||
class localthing(thing): | class localthing(thing): | ||||
def foo(self, one, two=None): | def foo(self, one, two=None): | ||||
if one: | if one: | ||||
return b"%s and %s" % (one, two,) | return b"%s and %s" % ( | ||||
one, | |||||
two, | |||||
) | |||||
return b"Nope" | return b"Nope" | ||||
def bar(self, b, a): | def bar(self, b, a): | ||||
return b"%s und %s" % (b, a,) | return b"%s und %s" % ( | ||||
b, | |||||
a, | |||||
) | |||||
def greet(self, name=None): | def greet(self, name=None): | ||||
return b"Hello, %s" % name | return b"Hello, %s" % name | ||||
@contextlib.contextmanager | @contextlib.contextmanager | ||||
def commandexecutor(self): | def commandexecutor(self): | ||||
e = localrepo.localcommandexecutor(self) | e = localrepo.localcommandexecutor(self) | ||||
try: | try: | ||||
return self.server.perform(req) | return self.server.perform(req) | ||||
def _submitbatch(self, cmds): | def _submitbatch(self, cmds): | ||||
req = [] | req = [] | ||||
for name, args in cmds: | for name, args in cmds: | ||||
args = b','.join(n + b'=' + escapearg(v) for n, v in args) | args = b','.join(n + b'=' + escapearg(v) for n, v in args) | ||||
req.append(name + b':' + args) | req.append(name + b':' + args) | ||||
req = b';'.join(req) | req = b';'.join(req) | ||||
res = self._submitone(b'batch', [(b'cmds', req,)]) | res = self._submitone( | ||||
b'batch', | |||||
[ | |||||
( | |||||
b'cmds', | |||||
req, | |||||
) | |||||
], | |||||
) | |||||
for r in res.split(b';'): | for r in res.split(b';'): | ||||
yield r | yield r | ||||
@contextlib.contextmanager | @contextlib.contextmanager | ||||
def commandexecutor(self): | def commandexecutor(self): | ||||
e = wireprotov1peer.peerexecutor(self) | e = wireprotov1peer.peerexecutor(self) | ||||
try: | try: | ||||
yield e | yield e | ||||
finally: | finally: | ||||
e.close() | e.close() | ||||
@wireprotov1peer.batchable | @wireprotov1peer.batchable | ||||
def foo(self, one, two=None): | def foo(self, one, two=None): | ||||
encargs = [(b'one', mangle(one),), (b'two', mangle(two),)] | encargs = [ | ||||
( | |||||
b'one', | |||||
mangle(one), | |||||
), | |||||
( | |||||
b'two', | |||||
mangle(two), | |||||
), | |||||
] | |||||
encresref = wireprotov1peer.future() | encresref = wireprotov1peer.future() | ||||
yield encargs, encresref | yield encargs, encresref | ||||
yield unmangle(encresref.value) | yield unmangle(encresref.value) | ||||
@wireprotov1peer.batchable | @wireprotov1peer.batchable | ||||
def bar(self, b, a): | def bar(self, b, a): | ||||
encresref = wireprotov1peer.future() | encresref = wireprotov1peer.future() | ||||
yield [(b'b', mangle(b),), (b'a', mangle(a),)], encresref | yield [ | ||||
( | |||||
b'b', | |||||
mangle(b), | |||||
), | |||||
( | |||||
b'a', | |||||
mangle(a), | |||||
), | |||||
], encresref | |||||
yield unmangle(encresref.value) | yield unmangle(encresref.value) | ||||
# greet is coded directly. It therefore does not support batching. If it | # greet is coded directly. It therefore does not support batching. If it | ||||
# does appear in a batch, the batch is split around greet, and the call to | # does appear in a batch, the batch is split around greet, and the call to | ||||
# greet is done in its own roundtrip. | # greet is done in its own roundtrip. | ||||
def greet(self, name=None): | def greet(self, name=None): | ||||
return unmangle(self._submitone(b'greet', [(b'name', mangle(name),)])) | return unmangle( | ||||
self._submitone( | |||||
b'greet', | |||||
[ | |||||
( | |||||
b'name', | |||||
mangle(name), | |||||
) | |||||
], | |||||
) | |||||
) | |||||
# demo remote usage | # demo remote usage | ||||
myproxy = remotething(myserver) | myproxy = remotething(myserver) | ||||
print() | print() | ||||
bprint(b"== Remote") | bprint(b"== Remote") | ||||
use(myproxy) | use(myproxy) |
): | ): | ||||
cborutil.decodeall(b'\x9f\xff') | cborutil.decodeall(b'\x9f\xff') | ||||
def testfromiter1(self): | def testfromiter1(self): | ||||
source = [b'foo'] | source = [b'foo'] | ||||
self.assertEqual( | self.assertEqual( | ||||
list(cborutil.streamencodearrayfromiter(source)), | list(cborutil.streamencodearrayfromiter(source)), | ||||
[b'\x9f', b'\x43', b'foo', b'\xff',], | [ | ||||
b'\x9f', | |||||
b'\x43', | |||||
b'foo', | |||||
b'\xff', | |||||
], | |||||
) | ) | ||||
dest = b''.join(cborutil.streamencodearrayfromiter(source)) | dest = b''.join(cborutil.streamencodearrayfromiter(source)) | ||||
self.assertEqual(cbor.loads(dest), source) | self.assertEqual(cbor.loads(dest), source) | ||||
with self.assertRaisesRegex( | with self.assertRaisesRegex( | ||||
cborutil.CBORDecodeError, 'indefinite length uint not allowed' | cborutil.CBORDecodeError, 'indefinite length uint not allowed' | ||||
): | ): | ||||
'indefinite length bytestrings not ' 'allowed as array values', | 'indefinite length bytestrings not ' 'allowed as array values', | ||||
): | ): | ||||
cborutil.decodeall(encoded) | cborutil.decodeall(encoded) | ||||
class SetTests(TestCase): | class SetTests(TestCase): | ||||
def testempty(self): | def testempty(self): | ||||
self.assertEqual( | self.assertEqual( | ||||
list(cborutil.streamencode(set())), [b'\xd9\x01\x02', b'\x80',] | list(cborutil.streamencode(set())), | ||||
[ | |||||
b'\xd9\x01\x02', | |||||
b'\x80', | |||||
], | |||||
) | ) | ||||
self.assertEqual(cborutil.decodeall(b'\xd9\x01\x02\x80'), [set()]) | self.assertEqual(cborutil.decodeall(b'\xd9\x01\x02\x80'), [set()]) | ||||
def testset(self): | def testset(self): | ||||
source = {b'foo', None, 42} | source = {b'foo', None, 42} | ||||
encoded = b''.join(cborutil.streamencode(source)) | encoded = b''.join(cborutil.streamencode(source)) | ||||
) | ) | ||||
with self.assertRaisesRegex( | with self.assertRaisesRegex( | ||||
cborutil.CBORDecodeError, | cborutil.CBORDecodeError, | ||||
'indefinite length bytestrings not ' 'allowed as set values', | 'indefinite length bytestrings not ' 'allowed as set values', | ||||
): | ): | ||||
cborutil.decodeall(encoded) | cborutil.decodeall(encoded) | ||||
encoded = b''.join([b'\xd9\x01\x02', b'\x81', b'\x80',]) # empty array | encoded = b''.join( | ||||
[ | |||||
b'\xd9\x01\x02', | |||||
b'\x81', | |||||
b'\x80', | |||||
] | |||||
) # empty array | |||||
with self.assertRaisesRegex( | with self.assertRaisesRegex( | ||||
cborutil.CBORDecodeError, 'collections not allowed as set values' | cborutil.CBORDecodeError, 'collections not allowed as set values' | ||||
): | ): | ||||
cborutil.decodeall(encoded) | cborutil.decodeall(encoded) | ||||
encoded = b''.join([b'\xd9\x01\x02', b'\x81', b'\xa0',]) # empty map | encoded = b''.join( | ||||
[ | |||||
b'\xd9\x01\x02', | |||||
b'\x81', | |||||
b'\xa0', | |||||
] | |||||
) # empty map | |||||
with self.assertRaisesRegex( | with self.assertRaisesRegex( | ||||
cborutil.CBORDecodeError, 'collections not allowed as set values' | cborutil.CBORDecodeError, 'collections not allowed as set values' | ||||
): | ): | ||||
cborutil.decodeall(encoded) | cborutil.decodeall(encoded) | ||||
encoded = b''.join( | encoded = b''.join( | ||||
[ | [ | ||||
) | ) | ||||
with self.assertRaisesRegex( | with self.assertRaisesRegex( | ||||
cborutil.CBORDecodeError, | cborutil.CBORDecodeError, | ||||
'indefinite length bytestrings not ' 'allowed as map keys', | 'indefinite length bytestrings not ' 'allowed as map keys', | ||||
): | ): | ||||
cborutil.decodeall(encoded) | cborutil.decodeall(encoded) | ||||
encoded = b''.join([b'\xa1', b'\x80', b'\x43foo',]) # empty array | encoded = b''.join( | ||||
[ | |||||
b'\xa1', | |||||
b'\x80', | |||||
b'\x43foo', | |||||
] | |||||
) # empty array | |||||
with self.assertRaisesRegex( | with self.assertRaisesRegex( | ||||
cborutil.CBORDecodeError, 'collections not supported as map keys' | cborutil.CBORDecodeError, 'collections not supported as map keys' | ||||
): | ): | ||||
cborutil.decodeall(encoded) | cborutil.decodeall(encoded) | ||||
def testillegalvalue(self): | def testillegalvalue(self): | ||||
encoded = b''.join( | encoded = b''.join( | ||||
class DecodeallTests(TestCase): | class DecodeallTests(TestCase): | ||||
def testemptyinput(self): | def testemptyinput(self): | ||||
self.assertEqual(cborutil.decodeall(b''), []) | self.assertEqual(cborutil.decodeall(b''), []) | ||||
def testpartialinput(self): | def testpartialinput(self): | ||||
encoded = b''.join( | encoded = b''.join( | ||||
[b'\x82', b'\x01',] # array of 2 elements # integer 1 | [ | ||||
b'\x82', | |||||
b'\x01', | |||||
] # array of 2 elements # integer 1 | |||||
) | ) | ||||
with self.assertRaisesRegex( | with self.assertRaisesRegex( | ||||
cborutil.CBORDecodeError, 'input data not complete' | cborutil.CBORDecodeError, 'input data not complete' | ||||
): | ): | ||||
cborutil.decodeall(encoded) | cborutil.decodeall(encoded) | ||||
if __name__ == '__main__': | if __name__ == '__main__': | ||||
import silenttestrunner | import silenttestrunner | ||||
silenttestrunner.main(__name__) | silenttestrunner.main(__name__) |
fileset = 'set:(**.py)' | fileset = 'set:(**.py)' | ||||
cwd = os.path.dirname(os.environ["TESTDIR"]) | cwd = os.path.dirname(os.environ["TESTDIR"]) | ||||
if not os.path.isdir(os.path.join(cwd, ".hg")): | if not os.path.isdir(os.path.join(cwd, ".hg")): | ||||
sys.exit(0) | sys.exit(0) | ||||
files = subprocess.check_output( | files = subprocess.check_output( | ||||
"hg files --print0 \"%s\"" % fileset, shell=True, cwd=cwd, | "hg files --print0 \"%s\"" % fileset, | ||||
shell=True, | |||||
cwd=cwd, | |||||
).split(b'\0') | ).split(b'\0') | ||||
if sys.version_info[0] >= 3: | if sys.version_info[0] >= 3: | ||||
cwd = os.fsencode(cwd) | cwd = os.fsencode(cwd) | ||||
mods_tested = set() | mods_tested = set() | ||||
for f in files: | for f in files: | ||||
if not f: | if not f: |
) | ) | ||||
def testsimpleedits(self): | def testsimpleedits(self): | ||||
ll = linelog.linelog() | ll = linelog.linelog() | ||||
# Initial revision: add lines 0, 1, and 2 | # Initial revision: add lines 0, 1, and 2 | ||||
ll.replacelines(1, 0, 0, 0, 3) | ll.replacelines(1, 0, 0, 0, 3) | ||||
self.assertEqual( | self.assertEqual( | ||||
[(l.rev, l.linenum) for l in ll.annotate(1)], | [(l.rev, l.linenum) for l in ll.annotate(1)], | ||||
[(1, 0), (1, 1), (1, 2),], | [ | ||||
(1, 0), | |||||
(1, 1), | |||||
(1, 2), | |||||
], | |||||
) | ) | ||||
# Replace line 1 with a new line | # Replace line 1 with a new line | ||||
ll.replacelines(2, 1, 2, 1, 2) | ll.replacelines(2, 1, 2, 1, 2) | ||||
self.assertEqual( | self.assertEqual( | ||||
[(l.rev, l.linenum) for l in ll.annotate(2)], | [(l.rev, l.linenum) for l in ll.annotate(2)], | ||||
[(1, 0), (2, 1), (1, 2),], | [ | ||||
(1, 0), | |||||
(2, 1), | |||||
(1, 2), | |||||
], | |||||
) | ) | ||||
# delete a line out of 2 | # delete a line out of 2 | ||||
ll.replacelines(3, 1, 2, 0, 0) | ll.replacelines(3, 1, 2, 0, 0) | ||||
self.assertEqual( | self.assertEqual( | ||||
[(l.rev, l.linenum) for l in ll.annotate(3)], [(1, 0), (1, 2),] | [(l.rev, l.linenum) for l in ll.annotate(3)], | ||||
[ | |||||
(1, 0), | |||||
(1, 2), | |||||
], | |||||
) | ) | ||||
# annotation of 1 is unchanged | # annotation of 1 is unchanged | ||||
self.assertEqual( | self.assertEqual( | ||||
[(l.rev, l.linenum) for l in ll.annotate(1)], | [(l.rev, l.linenum) for l in ll.annotate(1)], | ||||
[(1, 0), (1, 1), (1, 2),], | [ | ||||
(1, 0), | |||||
(1, 1), | |||||
(1, 2), | |||||
], | |||||
) | ) | ||||
ll.annotate(3) # set internal state to revision 3 | ll.annotate(3) # set internal state to revision 3 | ||||
start = ll.getoffset(0) | start = ll.getoffset(0) | ||||
end = ll.getoffset(1) | end = ll.getoffset(1) | ||||
self.assertEqual(ll.getalllines(start, end), [(1, 0), (2, 1), (1, 1),]) | self.assertEqual( | ||||
self.assertEqual(ll.getalllines(), [(1, 0), (2, 1), (1, 1), (1, 2),]) | ll.getalllines(start, end), | ||||
[ | |||||
(1, 0), | |||||
(2, 1), | |||||
(1, 1), | |||||
], | |||||
) | |||||
self.assertEqual( | |||||
ll.getalllines(), | |||||
[ | |||||
(1, 0), | |||||
(2, 1), | |||||
(1, 1), | |||||
(1, 2), | |||||
], | |||||
) | |||||
def testparseclinelogfile(self): | def testparseclinelogfile(self): | ||||
# This data is what the replacements in testsimpleedits | # This data is what the replacements in testsimpleedits | ||||
# produce when fed to the original linelog.c implementation. | # produce when fed to the original linelog.c implementation. | ||||
data = ( | data = ( | ||||
b'\x00\x00\x00\x0c\x00\x00\x00\x0f' | b'\x00\x00\x00\x0c\x00\x00\x00\x0f' | ||||
b'\x00\x00\x00\x00\x00\x00\x00\x02' | b'\x00\x00\x00\x00\x00\x00\x00\x02' | ||||
b'\x00\x00\x00\x05\x00\x00\x00\x06' | b'\x00\x00\x00\x05\x00\x00\x00\x06' | ||||
b'\x00\x00\x00\x06\x00\x00\x00\x00' | b'\x00\x00\x00\x06\x00\x00\x00\x00' | ||||
b'\x00\x00\x00\x00\x00\x00\x00\x07' | b'\x00\x00\x00\x00\x00\x00\x00\x07' | ||||
b'\x00\x00\x00\x06\x00\x00\x00\x02' | b'\x00\x00\x00\x06\x00\x00\x00\x02' | ||||
b'\x00\x00\x00\x00\x00\x00\x00\x00' | b'\x00\x00\x00\x00\x00\x00\x00\x00' | ||||
b'\x00\x00\x00\t\x00\x00\x00\t' | b'\x00\x00\x00\t\x00\x00\x00\t' | ||||
b'\x00\x00\x00\x00\x00\x00\x00\x0c' | b'\x00\x00\x00\x00\x00\x00\x00\x0c' | ||||
b'\x00\x00\x00\x08\x00\x00\x00\x05' | b'\x00\x00\x00\x08\x00\x00\x00\x05' | ||||
b'\x00\x00\x00\x06\x00\x00\x00\x01' | b'\x00\x00\x00\x06\x00\x00\x00\x01' | ||||
b'\x00\x00\x00\x00\x00\x00\x00\x05' | b'\x00\x00\x00\x00\x00\x00\x00\x05' | ||||
b'\x00\x00\x00\x0c\x00\x00\x00\x05' | b'\x00\x00\x00\x0c\x00\x00\x00\x05' | ||||
b'\x00\x00\x00\n\x00\x00\x00\x01' | b'\x00\x00\x00\n\x00\x00\x00\x01' | ||||
b'\x00\x00\x00\x00\x00\x00\x00\t' | b'\x00\x00\x00\x00\x00\x00\x00\t' | ||||
) | ) | ||||
llc = linelog.linelog.fromdata(data) | llc = linelog.linelog.fromdata(data) | ||||
self.assertEqual( | self.assertEqual( | ||||
[(l.rev, l.linenum) for l in llc.annotate(1)], | [(l.rev, l.linenum) for l in llc.annotate(1)], | ||||
[(1, 0), (1, 1), (1, 2),], | [ | ||||
(1, 0), | |||||
(1, 1), | |||||
(1, 2), | |||||
], | |||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
[(l.rev, l.linenum) for l in llc.annotate(2)], | [(l.rev, l.linenum) for l in llc.annotate(2)], | ||||
[(1, 0), (2, 1), (1, 2),], | [ | ||||
(1, 0), | |||||
(2, 1), | |||||
(1, 2), | |||||
], | |||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
[(l.rev, l.linenum) for l in llc.annotate(3)], [(1, 0), (1, 2),] | [(l.rev, l.linenum) for l in llc.annotate(3)], | ||||
[ | |||||
(1, 0), | |||||
(1, 2), | |||||
], | |||||
) | ) | ||||
# Check we emit the same bytecode. | # Check we emit the same bytecode. | ||||
ll = linelog.linelog() | ll = linelog.linelog() | ||||
# Initial revision: add lines 0, 1, and 2 | # Initial revision: add lines 0, 1, and 2 | ||||
ll.replacelines(1, 0, 0, 0, 3) | ll.replacelines(1, 0, 0, 0, 3) | ||||
# Replace line 1 with a new line | # Replace line 1 with a new line | ||||
ll.replacelines(2, 1, 2, 1, 2) | ll.replacelines(2, 1, 2, 1, 2) | ||||
# delete a line out of 2 | # delete a line out of 2 |
def postreleasefn(self, success): | def postreleasefn(self, success): | ||||
self._postreleasecalled = True | self._postreleasecalled = True | ||||
def assertacquirecalled(self, called): | def assertacquirecalled(self, called): | ||||
self._testcase.assertEqual( | self._testcase.assertEqual( | ||||
self._acquirecalled, | self._acquirecalled, | ||||
called, | called, | ||||
'expected acquire to be %s but was actually %s' | 'expected acquire to be %s but was actually %s' | ||||
% (self._tocalled(called), self._tocalled(self._acquirecalled),), | % ( | ||||
self._tocalled(called), | |||||
self._tocalled(self._acquirecalled), | |||||
), | |||||
) | ) | ||||
def resetacquirefn(self): | def resetacquirefn(self): | ||||
self._acquirecalled = False | self._acquirecalled = False | ||||
def assertreleasecalled(self, called): | def assertreleasecalled(self, called): | ||||
self._testcase.assertEqual( | self._testcase.assertEqual( | ||||
self._releasecalled, | self._releasecalled, | ||||
called, | called, | ||||
'expected release to be %s but was actually %s' | 'expected release to be %s but was actually %s' | ||||
% (self._tocalled(called), self._tocalled(self._releasecalled),), | % ( | ||||
self._tocalled(called), | |||||
self._tocalled(self._releasecalled), | |||||
), | |||||
) | ) | ||||
def assertpostreleasecalled(self, called): | def assertpostreleasecalled(self, called): | ||||
self._testcase.assertEqual( | self._testcase.assertEqual( | ||||
self._postreleasecalled, | self._postreleasecalled, | ||||
called, | called, | ||||
'expected postrelease to be %s but was actually %s' | 'expected postrelease to be %s but was actually %s' | ||||
% ( | % ( | ||||
self._tocalled(called), | self._tocalled(called), | ||||
self._tocalled(self._postreleasecalled), | self._tocalled(self._postreleasecalled), | ||||
), | ), | ||||
) | ) | ||||
def assertlockexists(self, exists): | def assertlockexists(self, exists): | ||||
actual = self.vfs.lexists(testlockname) | actual = self.vfs.lexists(testlockname) | ||||
self._testcase.assertEqual( | self._testcase.assertEqual( | ||||
actual, | actual, | ||||
exists, | exists, | ||||
'expected lock to %s but actually did %s' | 'expected lock to %s but actually did %s' | ||||
% (self._toexists(exists), self._toexists(actual),), | % ( | ||||
self._toexists(exists), | |||||
self._toexists(actual), | |||||
), | |||||
) | ) | ||||
def _tocalled(self, called): | def _tocalled(self, called): | ||||
if called: | if called: | ||||
return 'called' | return 'called' | ||||
else: | else: | ||||
return 'not called' | return 'not called' | ||||
HASH_1 = b'1' * 40 | HASH_1 = b'1' * 40 | ||||
BIN_HASH_1 = binascii.unhexlify(HASH_1) | BIN_HASH_1 = binascii.unhexlify(HASH_1) | ||||
HASH_2 = b'f' * 40 | HASH_2 = b'f' * 40 | ||||
BIN_HASH_2 = binascii.unhexlify(HASH_2) | BIN_HASH_2 = binascii.unhexlify(HASH_2) | ||||
HASH_3 = b'1234567890abcdef0987654321deadbeef0fcafe' | HASH_3 = b'1234567890abcdef0987654321deadbeef0fcafe' | ||||
BIN_HASH_3 = binascii.unhexlify(HASH_3) | BIN_HASH_3 = binascii.unhexlify(HASH_3) | ||||
A_SHORT_MANIFEST = ( | A_SHORT_MANIFEST = ( | ||||
b'bar/baz/qux.py\0%(hash2)s%(flag2)s\n' b'foo\0%(hash1)s%(flag1)s\n' | b'bar/baz/qux.py\0%(hash2)s%(flag2)s\n' b'foo\0%(hash1)s%(flag1)s\n' | ||||
) % {b'hash1': HASH_1, b'flag1': b'', b'hash2': HASH_2, b'flag2': b'l',} | ) % { | ||||
b'hash1': HASH_1, | |||||
b'flag1': b'', | |||||
b'hash2': HASH_2, | |||||
b'flag2': b'l', | |||||
} | |||||
A_DEEPER_MANIFEST = ( | A_DEEPER_MANIFEST = ( | ||||
b'a/b/c/bar.py\0%(hash3)s%(flag1)s\n' | b'a/b/c/bar.py\0%(hash3)s%(flag1)s\n' | ||||
b'a/b/c/bar.txt\0%(hash1)s%(flag1)s\n' | b'a/b/c/bar.txt\0%(hash1)s%(flag1)s\n' | ||||
b'a/b/c/foo.py\0%(hash3)s%(flag1)s\n' | b'a/b/c/foo.py\0%(hash3)s%(flag1)s\n' | ||||
b'a/b/c/foo.txt\0%(hash2)s%(flag2)s\n' | b'a/b/c/foo.txt\0%(hash2)s%(flag2)s\n' | ||||
b'a/b/d/baz.py\0%(hash3)s%(flag1)s\n' | b'a/b/d/baz.py\0%(hash3)s%(flag1)s\n' | ||||
b'a/b/d/qux.py\0%(hash1)s%(flag2)s\n' | b'a/b/d/qux.py\0%(hash1)s%(flag2)s\n' | ||||
self.assertIn('Manifest did not end in a newline.', str(v)) | self.assertIn('Manifest did not end in a newline.', str(v)) | ||||
def testHugeManifest(self): | def testHugeManifest(self): | ||||
m = self.parsemanifest(A_HUGE_MANIFEST) | m = self.parsemanifest(A_HUGE_MANIFEST) | ||||
self.assertEqual(HUGE_MANIFEST_ENTRIES, len(m)) | self.assertEqual(HUGE_MANIFEST_ENTRIES, len(m)) | ||||
self.assertEqual(len(m), len(list(m))) | self.assertEqual(len(m), len(list(m))) | ||||
def testMatchesMetadata(self): | def testMatchesMetadata(self): | ||||
'''Tests matches() for a few specific files to make sure that both | """Tests matches() for a few specific files to make sure that both | ||||
the set of files as well as their flags and nodeids are correct in | the set of files as well as their flags and nodeids are correct in | ||||
the resulting manifest.''' | the resulting manifest.""" | ||||
m = self.parsemanifest(A_HUGE_MANIFEST) | m = self.parsemanifest(A_HUGE_MANIFEST) | ||||
match = matchmod.exact([b'file1', b'file200', b'file300']) | match = matchmod.exact([b'file1', b'file200', b'file300']) | ||||
m2 = m._matches(match) | m2 = m._matches(match) | ||||
w = (b'file1\0%sx\n' b'file200\0%sl\n' b'file300\0%s\n') % ( | w = (b'file1\0%sx\n' b'file200\0%sl\n' b'file300\0%s\n') % ( | ||||
HASH_2, | HASH_2, | ||||
HASH_1, | HASH_1, | ||||
HASH_1, | HASH_1, | ||||
) | ) | ||||
self.assertEqual(w, m2.text()) | self.assertEqual(w, m2.text()) | ||||
def testMatchesNonexistentFile(self): | def testMatchesNonexistentFile(self): | ||||
'''Tests matches() for a small set of specific files, including one | """Tests matches() for a small set of specific files, including one | ||||
nonexistent file to make sure in only matches against existing files. | nonexistent file to make sure in only matches against existing files. | ||||
''' | """ | ||||
m = self.parsemanifest(A_DEEPER_MANIFEST) | m = self.parsemanifest(A_DEEPER_MANIFEST) | ||||
match = matchmod.exact( | match = matchmod.exact( | ||||
[b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt', b'nonexistent'] | [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt', b'nonexistent'] | ||||
) | ) | ||||
m2 = m._matches(match) | m2 = m._matches(match) | ||||
self.assertEqual( | self.assertEqual( | ||||
[b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt'], m2.keys() | [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt'], m2.keys() | ||||
) | ) | ||||
def testMatchesNonexistentDirectory(self): | def testMatchesNonexistentDirectory(self): | ||||
'''Tests matches() for a relpath match on a directory that doesn't | """Tests matches() for a relpath match on a directory that doesn't | ||||
actually exist.''' | actually exist.""" | ||||
m = self.parsemanifest(A_DEEPER_MANIFEST) | m = self.parsemanifest(A_DEEPER_MANIFEST) | ||||
match = matchmod.match( | match = matchmod.match( | ||||
util.localpath(b'/repo'), b'', [b'a/f'], default=b'relpath' | util.localpath(b'/repo'), b'', [b'a/f'], default=b'relpath' | ||||
) | ) | ||||
m2 = m._matches(match) | m2 = m._matches(match) | ||||
self.assertEqual([], m2.keys()) | self.assertEqual([], m2.keys()) | ||||
def testMatchesExactLarge(self): | def testMatchesExactLarge(self): | ||||
'''Tests matches() for files matching a large list of exact files. | """Tests matches() for files matching a large list of exact files.""" | ||||
''' | |||||
m = self.parsemanifest(A_HUGE_MANIFEST) | m = self.parsemanifest(A_HUGE_MANIFEST) | ||||
flist = m.keys()[80:300] | flist = m.keys()[80:300] | ||||
match = matchmod.exact(flist) | match = matchmod.exact(flist) | ||||
m2 = m._matches(match) | m2 = m._matches(match) | ||||
self.assertEqual(flist, m2.keys()) | self.assertEqual(flist, m2.keys()) | ||||
def testMatchesFull(self): | def testMatchesFull(self): | ||||
'''Tests matches() for what should be a full match.''' | '''Tests matches() for what should be a full match.''' | ||||
m = self.parsemanifest(A_DEEPER_MANIFEST) | m = self.parsemanifest(A_DEEPER_MANIFEST) | ||||
match = matchmod.match(util.localpath(b'/repo'), b'', [b'']) | match = matchmod.match(util.localpath(b'/repo'), b'', [b'']) | ||||
m2 = m._matches(match) | m2 = m._matches(match) | ||||
self.assertEqual(m.keys(), m2.keys()) | self.assertEqual(m.keys(), m2.keys()) | ||||
def testMatchesDirectory(self): | def testMatchesDirectory(self): | ||||
'''Tests matches() on a relpath match on a directory, which should | """Tests matches() on a relpath match on a directory, which should | ||||
match against all files within said directory.''' | match against all files within said directory.""" | ||||
m = self.parsemanifest(A_DEEPER_MANIFEST) | m = self.parsemanifest(A_DEEPER_MANIFEST) | ||||
match = matchmod.match( | match = matchmod.match( | ||||
util.localpath(b'/repo'), b'', [b'a/b'], default=b'relpath' | util.localpath(b'/repo'), b'', [b'a/b'], default=b'relpath' | ||||
) | ) | ||||
m2 = m._matches(match) | m2 = m._matches(match) | ||||
self.assertEqual( | self.assertEqual( | ||||
[ | [ | ||||
b'a/b/c/bar.py', | b'a/b/c/bar.py', | ||||
b'a/b/c/bar.txt', | b'a/b/c/bar.txt', | ||||
b'a/b/c/foo.py', | b'a/b/c/foo.py', | ||||
b'a/b/c/foo.txt', | b'a/b/c/foo.txt', | ||||
b'a/b/d/baz.py', | b'a/b/d/baz.py', | ||||
b'a/b/d/qux.py', | b'a/b/d/qux.py', | ||||
b'a/b/d/ten.txt', | b'a/b/d/ten.txt', | ||||
b'a/b/dog.py', | b'a/b/dog.py', | ||||
b'a/b/fish.py', | b'a/b/fish.py', | ||||
], | ], | ||||
m2.keys(), | m2.keys(), | ||||
) | ) | ||||
def testMatchesExactPath(self): | def testMatchesExactPath(self): | ||||
'''Tests matches() on an exact match on a directory, which should | """Tests matches() on an exact match on a directory, which should | ||||
result in an empty manifest because you can't perform an exact match | result in an empty manifest because you can't perform an exact match | ||||
against a directory.''' | against a directory.""" | ||||
m = self.parsemanifest(A_DEEPER_MANIFEST) | m = self.parsemanifest(A_DEEPER_MANIFEST) | ||||
match = matchmod.exact([b'a/b']) | match = matchmod.exact([b'a/b']) | ||||
m2 = m._matches(match) | m2 = m._matches(match) | ||||
self.assertEqual([], m2.keys()) | self.assertEqual([], m2.keys()) | ||||
def testMatchesCwd(self): | def testMatchesCwd(self): | ||||
'''Tests matches() on a relpath match with the current directory ('.') | """Tests matches() on a relpath match with the current directory ('.') | ||||
when not in the root directory.''' | when not in the root directory.""" | ||||
m = self.parsemanifest(A_DEEPER_MANIFEST) | m = self.parsemanifest(A_DEEPER_MANIFEST) | ||||
match = matchmod.match( | match = matchmod.match( | ||||
util.localpath(b'/repo'), b'a/b', [b'.'], default=b'relpath' | util.localpath(b'/repo'), b'a/b', [b'.'], default=b'relpath' | ||||
) | ) | ||||
m2 = m._matches(match) | m2 = m._matches(match) | ||||
self.assertEqual( | self.assertEqual( | ||||
[ | [ | ||||
b'a/b/c/bar.py', | b'a/b/c/bar.py', | ||||
b'a/b/c/bar.txt', | b'a/b/c/bar.txt', | ||||
b'a/b/c/foo.py', | b'a/b/c/foo.py', | ||||
b'a/b/c/foo.txt', | b'a/b/c/foo.txt', | ||||
b'a/b/d/baz.py', | b'a/b/d/baz.py', | ||||
b'a/b/d/qux.py', | b'a/b/d/qux.py', | ||||
b'a/b/d/ten.txt', | b'a/b/d/ten.txt', | ||||
b'a/b/dog.py', | b'a/b/dog.py', | ||||
b'a/b/fish.py', | b'a/b/fish.py', | ||||
], | ], | ||||
m2.keys(), | m2.keys(), | ||||
) | ) | ||||
def testMatchesWithPattern(self): | def testMatchesWithPattern(self): | ||||
'''Tests matches() for files matching a pattern that reside | """Tests matches() for files matching a pattern that reside | ||||
deeper than the specified directory.''' | deeper than the specified directory.""" | ||||
m = self.parsemanifest(A_DEEPER_MANIFEST) | m = self.parsemanifest(A_DEEPER_MANIFEST) | ||||
match = matchmod.match(util.localpath(b'/repo'), b'', [b'a/b/*/*.txt']) | match = matchmod.match(util.localpath(b'/repo'), b'', [b'a/b/*/*.txt']) | ||||
m2 = m._matches(match) | m2 = m._matches(match) | ||||
self.assertEqual( | self.assertEqual( | ||||
[b'a/b/c/bar.txt', b'a/b/c/foo.txt', b'a/b/d/ten.txt'], m2.keys() | [b'a/b/c/bar.txt', b'a/b/c/foo.txt', b'a/b/d/ten.txt'], m2.keys() | ||||
) | ) |
self.assertEqual(m.visitchildrenset(b'dir'), b'this') | self.assertEqual(m.visitchildrenset(b'dir'), b'this') | ||||
self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'all') | self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'all') | ||||
# OPT: This should probably be 'all' if its parent is? | # OPT: This should probably be 'all' if its parent is? | ||||
self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this') | self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this') | ||||
self.assertEqual(m.visitchildrenset(b'folder'), set()) | self.assertEqual(m.visitchildrenset(b'folder'), set()) | ||||
def testVisitdirRootfilesin(self): | def testVisitdirRootfilesin(self): | ||||
m = matchmod.match( | m = matchmod.match( | ||||
util.localpath(b'/repo'), b'', patterns=[b'rootfilesin:dir/subdir'], | util.localpath(b'/repo'), | ||||
b'', | |||||
patterns=[b'rootfilesin:dir/subdir'], | |||||
) | ) | ||||
assert isinstance(m, matchmod.patternmatcher) | assert isinstance(m, matchmod.patternmatcher) | ||||
self.assertFalse(m.visitdir(b'dir/subdir/x')) | self.assertFalse(m.visitdir(b'dir/subdir/x')) | ||||
self.assertFalse(m.visitdir(b'folder')) | self.assertFalse(m.visitdir(b'folder')) | ||||
# FIXME: These should probably be True. | # FIXME: These should probably be True. | ||||
self.assertFalse(m.visitdir(b'')) | self.assertFalse(m.visitdir(b'')) | ||||
self.assertFalse(m.visitdir(b'dir')) | self.assertFalse(m.visitdir(b'dir')) | ||||
self.assertFalse(m.visitdir(b'dir/subdir')) | self.assertFalse(m.visitdir(b'dir/subdir')) | ||||
def testVisitchildrensetRootfilesin(self): | def testVisitchildrensetRootfilesin(self): | ||||
m = matchmod.match( | m = matchmod.match( | ||||
util.localpath(b'/repo'), b'', patterns=[b'rootfilesin:dir/subdir'], | util.localpath(b'/repo'), | ||||
b'', | |||||
patterns=[b'rootfilesin:dir/subdir'], | |||||
) | ) | ||||
assert isinstance(m, matchmod.patternmatcher) | assert isinstance(m, matchmod.patternmatcher) | ||||
self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), set()) | self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), set()) | ||||
self.assertEqual(m.visitchildrenset(b'folder'), set()) | self.assertEqual(m.visitchildrenset(b'folder'), set()) | ||||
# FIXME: These should probably be {'dir'}, {'subdir'} and 'this', | # FIXME: These should probably be {'dir'}, {'subdir'} and 'this', | ||||
# respectively, or at least 'this' for all three. | # respectively, or at least 'this' for all three. | ||||
self.assertEqual(m.visitchildrenset(b''), set()) | self.assertEqual(m.visitchildrenset(b''), set()) | ||||
self.assertEqual(m.visitchildrenset(b'dir'), set()) | self.assertEqual(m.visitchildrenset(b'dir'), set()) |
d = r | d = r | ||||
for j in c: | for j in c: | ||||
d = b''.join((d[:j], d[j : j + 1].upper(), d[j + 1 :])) | d = b''.join((d[:j], d[j : j + 1].upper(), d[j + 1 :])) | ||||
combos.add(d) | combos.add(d) | ||||
return sorted(combos) | return sorted(combos) | ||||
def buildprobtable(fp, cmd='hg manifest tip'): | def buildprobtable(fp, cmd='hg manifest tip'): | ||||
'''Construct and print a table of probabilities for path name | """Construct and print a table of probabilities for path name | ||||
components. The numbers are percentages.''' | components. The numbers are percentages.""" | ||||
counts = collections.defaultdict(lambda: 0) | counts = collections.defaultdict(lambda: 0) | ||||
for line in os.popen(cmd).read().splitlines(): | for line in os.popen(cmd).read().splitlines(): | ||||
if line[-2:] in ('.i', '.d'): | if line[-2:] in ('.i', '.d'): | ||||
line = line[:-2] | line = line[:-2] | ||||
if line.startswith('data/'): | if line.startswith('data/'): | ||||
line = line[5:] | line = line[5:] | ||||
for c in line: | for c in line: |
if len(args) > 4: | if len(args) > 4: | ||||
meta = args[4] | meta = args[4] | ||||
packer.add(filename, node, base, content, metadata=meta) | packer.add(filename, node, base, content, metadata=meta) | ||||
path = packer.close() | path = packer.close() | ||||
return self.datapackreader(path) | return self.datapackreader(path) | ||||
def _testAddSingle(self, content): | def _testAddSingle(self, content): | ||||
"""Test putting a simple blob into a pack and reading it out. | """Test putting a simple blob into a pack and reading it out.""" | ||||
""" | |||||
filename = b"foo" | filename = b"foo" | ||||
node = self.getHash(content) | node = self.getHash(content) | ||||
revisions = [(filename, node, nullid, content)] | revisions = [(filename, node, nullid, content)] | ||||
pack = self.createPack(revisions) | pack = self.createPack(revisions) | ||||
if self.paramsavailable: | if self.paramsavailable: | ||||
self.assertEqual( | self.assertEqual( | ||||
pack.params.fanoutprefix, basepack.SMALLFANOUTPREFIX | pack.params.fanoutprefix, basepack.SMALLFANOUTPREFIX | ||||
for filename, node, base, content in revisions: | for filename, node, base, content in revisions: | ||||
entry = pack.getdelta(filename, node) | entry = pack.getdelta(filename, node) | ||||
self.assertEqual((content, filename, base, {}), entry) | self.assertEqual((content, filename, base, {}), entry) | ||||
chain = pack.getdeltachain(filename, node) | chain = pack.getdeltachain(filename, node) | ||||
self.assertEqual(content, chain[0][4]) | self.assertEqual(content, chain[0][4]) | ||||
def testAddDeltas(self): | def testAddDeltas(self): | ||||
"""Test putting multiple delta blobs into a pack and read the chain. | """Test putting multiple delta blobs into a pack and read the chain.""" | ||||
""" | |||||
revisions = [] | revisions = [] | ||||
filename = b"foo" | filename = b"foo" | ||||
lastnode = nullid | lastnode = nullid | ||||
for i in range(10): | for i in range(10): | ||||
content = b"abcdef%d" % i | content = b"abcdef%d" % i | ||||
node = self.getHash(content) | node = self.getHash(content) | ||||
revisions.append((filename, node, lastnode, content)) | revisions.append((filename, node, lastnode, content)) | ||||
lastnode = node | lastnode = node | ||||
pack = self.createPack(revisions) | pack = self.createPack(revisions) | ||||
entry = pack.getdelta(filename, revisions[0][1]) | entry = pack.getdelta(filename, revisions[0][1]) | ||||
realvalue = (revisions[0][3], filename, revisions[0][2], {}) | realvalue = (revisions[0][3], filename, revisions[0][2], {}) | ||||
self.assertEqual(entry, realvalue) | self.assertEqual(entry, realvalue) | ||||
# Test that the chain for the final entry has all the others | # Test that the chain for the final entry has all the others | ||||
chain = pack.getdeltachain(filename, node) | chain = pack.getdeltachain(filename, node) | ||||
for i in range(10): | for i in range(10): | ||||
content = b"abcdef%d" % i | content = b"abcdef%d" % i | ||||
self.assertEqual(content, chain[-i - 1][4]) | self.assertEqual(content, chain[-i - 1][4]) | ||||
def testPackMany(self): | def testPackMany(self): | ||||
"""Pack many related and unrelated objects. | """Pack many related and unrelated objects.""" | ||||
""" | |||||
# Build a random pack file | # Build a random pack file | ||||
revisions = [] | revisions = [] | ||||
blobs = {} | blobs = {} | ||||
random.seed(0) | random.seed(0) | ||||
for i in range(100): | for i in range(100): | ||||
filename = b"filename-%d" % i | filename = b"filename-%d" % i | ||||
filerevs = [] | filerevs = [] | ||||
for j in range(random.randint(1, 100)): | for j in range(random.randint(1, 100)): | ||||
for name, node, x, content, origmeta in revisions: | for name, node, x, content, origmeta in revisions: | ||||
parsedmeta = pack.getmeta(name, node) | parsedmeta = pack.getmeta(name, node) | ||||
# flag == 0 should be optimized out | # flag == 0 should be optimized out | ||||
if origmeta[constants.METAKEYFLAG] == 0: | if origmeta[constants.METAKEYFLAG] == 0: | ||||
del origmeta[constants.METAKEYFLAG] | del origmeta[constants.METAKEYFLAG] | ||||
self.assertEqual(parsedmeta, origmeta) | self.assertEqual(parsedmeta, origmeta) | ||||
def testGetMissing(self): | def testGetMissing(self): | ||||
"""Test the getmissing() api. | """Test the getmissing() api.""" | ||||
""" | |||||
revisions = [] | revisions = [] | ||||
filename = b"foo" | filename = b"foo" | ||||
lastnode = nullid | lastnode = nullid | ||||
for i in range(10): | for i in range(10): | ||||
content = b"abcdef%d" % i | content = b"abcdef%d" % i | ||||
node = self.getHash(content) | node = self.getHash(content) | ||||
revisions.append((filename, node, lastnode, content)) | revisions.append((filename, node, lastnode, content)) | ||||
lastnode = node | lastnode = node |
for filename, node, p1, p2, linknode, copyfrom in revisions: | for filename, node, p1, p2, linknode, copyfrom in revisions: | ||||
packer.add(filename, node, p1, p2, linknode, copyfrom) | packer.add(filename, node, p1, p2, linknode, copyfrom) | ||||
path = packer.close() | path = packer.close() | ||||
return historypack.historypack(path) | return historypack.historypack(path) | ||||
def testAddSingle(self): | def testAddSingle(self): | ||||
"""Test putting a single entry into a pack and reading it out. | """Test putting a single entry into a pack and reading it out.""" | ||||
""" | |||||
filename = b"foo" | filename = b"foo" | ||||
node = self.getFakeHash() | node = self.getFakeHash() | ||||
p1 = self.getFakeHash() | p1 = self.getFakeHash() | ||||
p2 = self.getFakeHash() | p2 = self.getFakeHash() | ||||
linknode = self.getFakeHash() | linknode = self.getFakeHash() | ||||
revisions = [(filename, node, p1, p2, linknode, None)] | revisions = [(filename, node, p1, p2, linknode, None)] | ||||
pack = self.createPack(revisions) | pack = self.createPack(revisions) | ||||
for filename, node, p1, p2, linknode, copyfrom in revisions: | for filename, node, p1, p2, linknode, copyfrom in revisions: | ||||
ap1, ap2, alinknode, acopyfrom = ancestors[node] | ap1, ap2, alinknode, acopyfrom = ancestors[node] | ||||
self.assertEqual(ap1, p1) | self.assertEqual(ap1, p1) | ||||
self.assertEqual(ap2, p2) | self.assertEqual(ap2, p2) | ||||
self.assertEqual(alinknode, linknode) | self.assertEqual(alinknode, linknode) | ||||
self.assertEqual(acopyfrom, copyfrom) | self.assertEqual(acopyfrom, copyfrom) | ||||
def testPackMany(self): | def testPackMany(self): | ||||
"""Pack many related and unrelated ancestors. | """Pack many related and unrelated ancestors.""" | ||||
""" | |||||
# Build a random pack file | # Build a random pack file | ||||
allentries = {} | allentries = {} | ||||
ancestorcounts = {} | ancestorcounts = {} | ||||
revisions = [] | revisions = [] | ||||
random.seed(0) | random.seed(0) | ||||
for i in range(100): | for i in range(100): | ||||
filename = b"filename-%d" % i | filename = b"filename-%d" % i | ||||
entries = [] | entries = [] | ||||
for filename, node, p1, p2, linknode, copyfrom in revisions: | for filename, node, p1, p2, linknode, copyfrom in revisions: | ||||
ap1, ap2, alinknode, acopyfrom = pack.getnodeinfo(filename, node) | ap1, ap2, alinknode, acopyfrom = pack.getnodeinfo(filename, node) | ||||
self.assertEqual(ap1, p1) | self.assertEqual(ap1, p1) | ||||
self.assertEqual(ap2, p2) | self.assertEqual(ap2, p2) | ||||
self.assertEqual(alinknode, linknode) | self.assertEqual(alinknode, linknode) | ||||
self.assertEqual(acopyfrom, copyfrom) | self.assertEqual(acopyfrom, copyfrom) | ||||
def testGetMissing(self): | def testGetMissing(self): | ||||
"""Test the getmissing() api. | """Test the getmissing() api.""" | ||||
""" | |||||
revisions = [] | revisions = [] | ||||
filename = b"foo" | filename = b"foo" | ||||
for i in range(10): | for i in range(10): | ||||
node = self.getFakeHash() | node = self.getFakeHash() | ||||
p1 = self.getFakeHash() | p1 = self.getFakeHash() | ||||
p2 = self.getFakeHash() | p2 = self.getFakeHash() | ||||
linknode = self.getFakeHash() | linknode = self.getFakeHash() | ||||
revisions.append((filename, node, p1, p2, linknode, None)) | revisions.append((filename, node, p1, p2, linknode, None)) |
def newrevlog(name=b'_testrevlog.i', recreate=False): | def newrevlog(name=b'_testrevlog.i', recreate=False): | ||||
if recreate: | if recreate: | ||||
tvfs.tryunlink(name) | tvfs.tryunlink(name) | ||||
rlog = revlog.revlog(tvfs, name) | rlog = revlog.revlog(tvfs, name) | ||||
return rlog | return rlog | ||||
def appendrev(rlog, text, tr, isext=False, isdelta=True): | def appendrev(rlog, text, tr, isext=False, isdelta=True): | ||||
'''Append a revision. If isext is True, set the EXTSTORED flag so flag | """Append a revision. If isext is True, set the EXTSTORED flag so flag | ||||
processor will be used (and rawtext is different from text). If isdelta is | processor will be used (and rawtext is different from text). If isdelta is | ||||
True, force the revision to be a delta, otherwise it's full text. | True, force the revision to be a delta, otherwise it's full text. | ||||
''' | """ | ||||
nextrev = len(rlog) | nextrev = len(rlog) | ||||
p1 = rlog.node(nextrev - 1) | p1 = rlog.node(nextrev - 1) | ||||
p2 = node.nullid | p2 = node.nullid | ||||
if isext: | if isext: | ||||
flags = revlog.REVIDX_EXTSTORED | flags = revlog.REVIDX_EXTSTORED | ||||
else: | else: | ||||
flags = revlog.REVIDX_DEFAULT_FLAGS | flags = revlog.REVIDX_DEFAULT_FLAGS | ||||
# Change storedeltachains temporarily, to override revlog's delta decision | # Change storedeltachains temporarily, to override revlog's delta decision | ||||
rlog._storedeltachains = isdelta | rlog._storedeltachains = isdelta | ||||
try: | try: | ||||
rlog.addrevision(text, tr, nextrev, p1, p2, flags=flags) | rlog.addrevision(text, tr, nextrev, p1, p2, flags=flags) | ||||
return nextrev | return nextrev | ||||
except Exception as ex: | except Exception as ex: | ||||
abort('rev %d: failed to append: %s' % (nextrev, ex)) | abort('rev %d: failed to append: %s' % (nextrev, ex)) | ||||
finally: | finally: | ||||
# Restore storedeltachains. It is always True, see revlog.__init__ | # Restore storedeltachains. It is always True, see revlog.__init__ | ||||
rlog._storedeltachains = True | rlog._storedeltachains = True | ||||
def addgroupcopy(rlog, tr, destname=b'_destrevlog.i', optimaldelta=True): | def addgroupcopy(rlog, tr, destname=b'_destrevlog.i', optimaldelta=True): | ||||
'''Copy revlog to destname using revlog.addgroup. Return the copied revlog. | """Copy revlog to destname using revlog.addgroup. Return the copied revlog. | ||||
This emulates push or pull. They use changegroup. Changegroup requires | This emulates push or pull. They use changegroup. Changegroup requires | ||||
repo to work. We don't have a repo, so a dummy changegroup is used. | repo to work. We don't have a repo, so a dummy changegroup is used. | ||||
If optimaldelta is True, use optimized delta parent, so the destination | If optimaldelta is True, use optimized delta parent, so the destination | ||||
revlog could probably reuse it. Otherwise it builds sub-optimal delta, and | revlog could probably reuse it. Otherwise it builds sub-optimal delta, and | ||||
the destination revlog needs more work to use it. | the destination revlog needs more work to use it. | ||||
This exercises some revlog.addgroup (and revlog._addrevision(text=None)) | This exercises some revlog.addgroup (and revlog._addrevision(text=None)) | ||||
code path, which is not covered by "appendrev" alone. | code path, which is not covered by "appendrev" alone. | ||||
''' | """ | ||||
class dummychangegroup(object): | class dummychangegroup(object): | ||||
@staticmethod | @staticmethod | ||||
def deltachunk(pnode): | def deltachunk(pnode): | ||||
pnode = pnode or node.nullid | pnode = pnode or node.nullid | ||||
parentrev = rlog.rev(pnode) | parentrev = rlog.rev(pnode) | ||||
r = parentrev + 1 | r = parentrev + 1 | ||||
if r >= len(rlog): | if r >= len(rlog): | ||||
dlog = newrevlog(destname, recreate=True) | dlog = newrevlog(destname, recreate=True) | ||||
dummydeltas = dummychangegroup().deltaiter() | dummydeltas = dummychangegroup().deltaiter() | ||||
dlog.addgroup(dummydeltas, linkmap, tr) | dlog.addgroup(dummydeltas, linkmap, tr) | ||||
return dlog | return dlog | ||||
def lowlevelcopy(rlog, tr, destname=b'_destrevlog.i'): | def lowlevelcopy(rlog, tr, destname=b'_destrevlog.i'): | ||||
'''Like addgroupcopy, but use the low level revlog._addrevision directly. | """Like addgroupcopy, but use the low level revlog._addrevision directly. | ||||
It exercises some code paths that are hard to reach easily otherwise. | It exercises some code paths that are hard to reach easily otherwise. | ||||
''' | """ | ||||
dlog = newrevlog(destname, recreate=True) | dlog = newrevlog(destname, recreate=True) | ||||
for r in rlog: | for r in rlog: | ||||
p1 = rlog.node(r - 1) | p1 = rlog.node(r - 1) | ||||
p2 = node.nullid | p2 = node.nullid | ||||
if r == 0 or (rlog.flags(r) & revlog.REVIDX_EXTSTORED): | if r == 0 or (rlog.flags(r) & revlog.REVIDX_EXTSTORED): | ||||
text = rlog.rawdata(r) | text = rlog.rawdata(r) | ||||
cachedelta = None | cachedelta = None | ||||
else: | else: | ||||
ifh.close() | ifh.close() | ||||
return dlog | return dlog | ||||
# Utilities to generate revisions for testing | # Utilities to generate revisions for testing | ||||
def genbits(n): | def genbits(n): | ||||
'''Given a number n, generate (2 ** (n * 2) + 1) numbers in range(2 ** n). | """Given a number n, generate (2 ** (n * 2) + 1) numbers in range(2 ** n). | ||||
i.e. the generated numbers have a width of n bits. | i.e. the generated numbers have a width of n bits. | ||||
The combination of two adjacent numbers will cover all possible cases. | The combination of two adjacent numbers will cover all possible cases. | ||||
That is to say, given any x, y where both x, and y are in range(2 ** n), | That is to say, given any x, y where both x, and y are in range(2 ** n), | ||||
there is an x followed immediately by y in the generated sequence. | there is an x followed immediately by y in the generated sequence. | ||||
''' | """ | ||||
m = 2 ** n | m = 2 ** n | ||||
# Gray Code. See https://en.wikipedia.org/wiki/Gray_code | # Gray Code. See https://en.wikipedia.org/wiki/Gray_code | ||||
gray = lambda x: x ^ (x >> 1) | gray = lambda x: x ^ (x >> 1) | ||||
reversegray = {gray(i): i for i in range(m)} | reversegray = {gray(i): i for i in range(m)} | ||||
# Generate (n * 2) bit gray code, yield lower n bits as X, and look for | # Generate (n * 2) bit gray code, yield lower n bits as X, and look for | ||||
# the next unused gray code where higher n bits equal to X. | # the next unused gray code where higher n bits equal to X. | ||||
def gentext(rev): | def gentext(rev): | ||||
'''Given a revision number, generate dummy text''' | '''Given a revision number, generate dummy text''' | ||||
return b''.join(b'%d\n' % j for j in range(-1, rev % 5)) | return b''.join(b'%d\n' % j for j in range(-1, rev % 5)) | ||||
def writecases(rlog, tr): | def writecases(rlog, tr): | ||||
'''Write some revisions interested to the test. | """Write some revisions interested to the test. | ||||
The test is interested in 3 properties of a revision: | The test is interested in 3 properties of a revision: | ||||
- Is it a delta or a full text? (isdelta) | - Is it a delta or a full text? (isdelta) | ||||
This is to catch some delta application issues. | This is to catch some delta application issues. | ||||
- Does it have a flag of EXTSTORED? (isext) | - Does it have a flag of EXTSTORED? (isext) | ||||
This is to catch some flag processor issues. Especially when | This is to catch some flag processor issues. Especially when | ||||
interacted with revlog deltas. | interacted with revlog deltas. | ||||
(r.delta, r.ext, r.empty, (r+1).delta, (r+1).ext, (r+1).empty) | (r.delta, r.ext, r.empty, (r+1).delta, (r+1).ext, (r+1).empty) | ||||
for r in range(len(rlog) - 1) | for r in range(len(rlog) - 1) | ||||
)) is 64. | )) is 64. | ||||
Where "r.delta", "r.ext", and "r.empty" are booleans matching properties | Where "r.delta", "r.ext", and "r.empty" are booleans matching properties | ||||
mentioned above. | mentioned above. | ||||
Return expected [(text, rawtext)]. | Return expected [(text, rawtext)]. | ||||
''' | """ | ||||
result = [] | result = [] | ||||
for i, x in enumerate(genbits(3)): | for i, x in enumerate(genbits(3)): | ||||
isdelta, isext, isempty = bool(x & 1), bool(x & 2), bool(x & 4) | isdelta, isext, isempty = bool(x & 1), bool(x & 2), bool(x & 4) | ||||
if isempty: | if isempty: | ||||
text = b'' | text = b'' | ||||
else: | else: | ||||
text = gentext(i) | text = gentext(i) | ||||
rev = appendrev(rlog, text, tr, isext=isext, isdelta=isdelta) | rev = appendrev(rlog, text, tr, isext=isext, isdelta=isdelta) |
# this would fail already without appropriate ancestor.__package__ | # this would fail already without appropriate ancestor.__package__ | ||||
from mercurial.rustext.ancestor import LazyAncestors | from mercurial.rustext.ancestor import LazyAncestors | ||||
from mercurial.testing import revlog as revlogtesting | from mercurial.testing import revlog as revlogtesting | ||||
@unittest.skipIf( | @unittest.skipIf( | ||||
rustext is None, "rustext module revlog relies on is not available", | rustext is None, | ||||
"rustext module revlog relies on is not available", | |||||
) | ) | ||||
class RustRevlogIndexTest(revlogtesting.RevlogBasedTestBase): | class RustRevlogIndexTest(revlogtesting.RevlogBasedTestBase): | ||||
def test_heads(self): | def test_heads(self): | ||||
idx = self.parseindex() | idx = self.parseindex() | ||||
rustidx = revlog.MixedIndex(idx) | rustidx = revlog.MixedIndex(idx) | ||||
self.assertEqual(rustidx.headrevs(), idx.headrevs()) | self.assertEqual(rustidx.headrevs(), idx.headrevs()) | ||||
def test_get_cindex(self): | def test_get_cindex(self): |
safetext = st.text( | safetext = st.text( | ||||
st.characters( | st.characters( | ||||
min_codepoint=1, max_codepoint=127, blacklist_categories=('Cc', 'Cs') | min_codepoint=1, max_codepoint=127, blacklist_categories=('Cc', 'Cs') | ||||
), | ), | ||||
min_size=1, | min_size=1, | ||||
).map(lambda s: s.encode('utf-8')) | ).map(lambda s: s.encode('utf-8')) | ||||
extensions = st.sampled_from(('shelve', 'mq', 'blackbox',)) | extensions = st.sampled_from( | ||||
( | |||||
'shelve', | |||||
'mq', | |||||
'blackbox', | |||||
) | |||||
) | |||||
@contextmanager | @contextmanager | ||||
def acceptableerrors(*args): | def acceptableerrors(*args): | ||||
"""Sometimes we know an operation we're about to perform might fail, and | """Sometimes we know an operation we're about to perform might fail, and | ||||
we're OK with some of the failures. In those cases this may be used as a | we're OK with some of the failures. In those cases this may be used as a | ||||
context manager and will swallow expected failures, as identified by | context manager and will swallow expected failures, as identified by | ||||
substrings of the error message Mercurial emits.""" | substrings of the error message Mercurial emits.""" | ||||
"--config %s=" % (extensionconfigkey(ext),), | "--config %s=" % (extensionconfigkey(ext),), | ||||
"", | "", | ||||
) | ) | ||||
o.write(l + os.linesep) | o.write(l + os.linesep) | ||||
with open(tf, 'r') as r: | with open(tf, 'r') as r: | ||||
t = r.read() | t = r.read() | ||||
assert ext not in t, t | assert ext not in t, t | ||||
output = subprocess.check_output( | output = subprocess.check_output( | ||||
[runtests, tf, "--local",], stderr=subprocess.STDOUT | [ | ||||
runtests, | |||||
tf, | |||||
"--local", | |||||
], | |||||
stderr=subprocess.STDOUT, | |||||
) | ) | ||||
assert "Ran 1 test" in output, output | assert "Ran 1 test" in output, output | ||||
except subprocess.CalledProcessError as e: | except subprocess.CalledProcessError as e: | ||||
note(e.output) | note(e.output) | ||||
if self.failed or e is not None: | if self.failed or e is not None: | ||||
with open(savefile, "wb") as o: | with open(savefile, "wb") as o: | ||||
o.write(ttest) | o.write(ttest) | ||||
if e is not None: | if e is not None: | ||||
content=st.one_of( | content=st.one_of( | ||||
st.binary(), st.text().map(lambda x: x.encode('utf-8')) | st.binary(), st.text().map(lambda x: x.encode('utf-8')) | ||||
), | ), | ||||
) | ) | ||||
def gencontent(self, content): | def gencontent(self, content): | ||||
return content | return content | ||||
@rule( | @rule( | ||||
target=branches, name=safetext, | target=branches, | ||||
name=safetext, | |||||
) | ) | ||||
def genbranch(self, name): | def genbranch(self, name): | ||||
return name | return name | ||||
@rule(target=paths, source=paths) | @rule(target=paths, source=paths) | ||||
def lowerpath(self, source): | def lowerpath(self, source): | ||||
return source.lower() | return source.lower() | ||||
return | return | ||||
with open(path, 'wb') as o: | with open(path, 'wb') as o: | ||||
o.write(content) | o.write(content) | ||||
self.log.append( | self.log.append( | ||||
( | ( | ||||
"$ python -c 'import binascii; " | "$ python -c 'import binascii; " | ||||
"print(binascii.unhexlify(\"%s\"))' > %s" | "print(binascii.unhexlify(\"%s\"))' > %s" | ||||
) | ) | ||||
% (binascii.hexlify(content), pipes.quote(path),) | % ( | ||||
binascii.hexlify(content), | |||||
pipes.quote(path), | |||||
) | |||||
) | ) | ||||
@rule(path=paths) | @rule(path=paths) | ||||
def addpath(self, path): | def addpath(self, path): | ||||
if os.path.exists(path): | if os.path.exists(path): | ||||
self.hg("add", "--", path) | self.hg("add", "--", path) | ||||
@rule(path=paths) | @rule(path=paths) | ||||
def forgetpath(self, path): | def forgetpath(self, path): | ||||
if os.path.exists(path): | if os.path.exists(path): | ||||
with acceptableerrors("file is already untracked",): | with acceptableerrors( | ||||
"file is already untracked", | |||||
): | |||||
self.hg("forget", "--", path) | self.hg("forget", "--", path) | ||||
@rule(s=st.none() | st.integers(0, 100)) | @rule(s=st.none() | st.integers(0, 100)) | ||||
def addremove(self, s): | def addremove(self, s): | ||||
args = ["addremove"] | args = ["addremove"] | ||||
if s is not None: | if s is not None: | ||||
args.extend(["-s", str(s)]) | args.extend(["-s", str(s)]) | ||||
self.hg(*args) | self.hg(*args) | ||||
def currentrepo(self): | def currentrepo(self): | ||||
return os.path.basename(os.getcwd()) | return os.path.basename(os.getcwd()) | ||||
@property | @property | ||||
def config(self): | def config(self): | ||||
return self.configperrepo.setdefault(self.currentrepo, {}) | return self.configperrepo.setdefault(self.currentrepo, {}) | ||||
@rule( | @rule( | ||||
target=repos, source=repos, name=reponames, | target=repos, | ||||
source=repos, | |||||
name=reponames, | |||||
) | ) | ||||
def clone(self, source, name): | def clone(self, source, name): | ||||
if not os.path.exists(os.path.join("..", name)): | if not os.path.exists(os.path.join("..", name)): | ||||
self.cd("..") | self.cd("..") | ||||
self.hg("clone", source, name) | self.hg("clone", source, name) | ||||
self.cd(name) | self.cd(name) | ||||
return name | return name | ||||
@rule( | @rule( | ||||
target=repos, name=reponames, | target=repos, | ||||
name=reponames, | |||||
) | ) | ||||
def fresh(self, name): | def fresh(self, name): | ||||
if not os.path.exists(os.path.join("..", name)): | if not os.path.exists(os.path.join("..", name)): | ||||
self.cd("..") | self.cd("..") | ||||
self.mkdirp(name) | self.mkdirp(name) | ||||
self.cd(name) | self.cd(name) | ||||
self.hg("init") | self.hg("init") | ||||
return name | return name | ||||
@rule(name=repos) | @rule(name=repos) | ||||
def switch(self, name): | def switch(self, name): | ||||
self.cd(os.path.join("..", name)) | self.cd(os.path.join("..", name)) | ||||
assert self.currentrepo == name | assert self.currentrepo == name | ||||
assert os.path.exists(".hg") | assert os.path.exists(".hg") | ||||
@rule(target=repos) | @rule(target=repos) | ||||
def origin(self): | def origin(self): | ||||
return "repo1" | return "repo1" | ||||
@rule() | @rule() | ||||
def pull(self, repo=repos): | def pull(self, repo=repos): | ||||
with acceptableerrors( | with acceptableerrors( | ||||
"repository default not found", "repository is unrelated", | "repository default not found", | ||||
"repository is unrelated", | |||||
): | ): | ||||
self.hg("pull") | self.hg("pull") | ||||
@rule(newbranch=st.booleans()) | @rule(newbranch=st.booleans()) | ||||
def push(self, newbranch): | def push(self, newbranch): | ||||
with acceptableerrors( | with acceptableerrors( | ||||
"default repository not configured", "no changes found", | "default repository not configured", | ||||
"no changes found", | |||||
): | ): | ||||
if newbranch: | if newbranch: | ||||
self.hg("push", "--new-branch") | self.hg("push", "--new-branch") | ||||
else: | else: | ||||
with acceptableerrors("creates new branches"): | with acceptableerrors("creates new branches"): | ||||
self.hg("push") | self.hg("push") | ||||
# Section: Simple side effect free "check" operations | # Section: Simple side effect free "check" operations | ||||
'a branch of the same name already exists', | 'a branch of the same name already exists', | ||||
'is reserved', | 'is reserved', | ||||
): | ): | ||||
self.hg("branch", "--", branch) | self.hg("branch", "--", branch) | ||||
@rule(branch=branches, clean=st.booleans()) | @rule(branch=branches, clean=st.booleans()) | ||||
def update(self, branch, clean): | def update(self, branch, clean): | ||||
with acceptableerrors( | with acceptableerrors( | ||||
'unknown revision', 'parse error', | 'unknown revision', | ||||
'parse error', | |||||
): | ): | ||||
if clean: | if clean: | ||||
self.hg("update", "-C", "--", branch) | self.hg("update", "-C", "--", branch) | ||||
else: | else: | ||||
self.hg("update", "--", branch) | self.hg("update", "--", branch) | ||||
# Section: Extension management | # Section: Extension management | ||||
def hasextension(self, extension): | def hasextension(self, extension): | ||||
self.underlying.close() | self.underlying.close() | ||||
def extensionconfigkey(extension): | def extensionconfigkey(extension): | ||||
return "extensions." + extension | return "extensions." + extension | ||||
settings.register_profile( | settings.register_profile( | ||||
'default', settings(timeout=300, stateful_step_count=50, max_examples=10,) | 'default', | ||||
settings( | |||||
timeout=300, | |||||
stateful_step_count=50, | |||||
max_examples=10, | |||||
), | |||||
) | ) | ||||
settings.register_profile( | settings.register_profile( | ||||
'fast', | 'fast', | ||||
settings( | settings( | ||||
timeout=10, | timeout=10, | ||||
stateful_step_count=20, | stateful_step_count=20, | ||||
max_examples=5, | max_examples=5, |
for f in meta[b'framegen']: | for f in meta[b'framegen']: | ||||
pass | pass | ||||
outstream = framing.outputstream(2) | outstream = framing.outputstream(2) | ||||
outstream.setencoder(globalui, b'zlib') | outstream.setencoder(globalui, b'zlib') | ||||
response1 = b''.join( | response1 = b''.join( | ||||
cborutil.streamencode( | cborutil.streamencode( | ||||
{b'status': b'ok', b'extra': b'response1' * 10,} | { | ||||
b'status': b'ok', | |||||
b'extra': b'response1' * 10, | |||||
} | |||||
) | ) | ||||
) | ) | ||||
response2 = b''.join( | response2 = b''.join( | ||||
cborutil.streamencode( | cborutil.streamencode( | ||||
{b'status': b'error', b'extra': b'response2' * 10,} | { | ||||
b'status': b'error', | |||||
b'extra': b'response2' * 10, | |||||
} | |||||
) | ) | ||||
) | ) | ||||
action, meta = sendframe( | action, meta = sendframe( | ||||
reactor, | reactor, | ||||
ffs( | ffs( | ||||
b'%d 2 stream-begin stream-settings eos cbor:b"zlib"' | b'%d 2 stream-begin stream-settings eos cbor:b"zlib"' | ||||
% request1.requestid | % request1.requestid | ||||
for f in meta[b'framegen']: | for f in meta[b'framegen']: | ||||
pass | pass | ||||
outstream = framing.outputstream(2) | outstream = framing.outputstream(2) | ||||
outstream.setencoder(globalui, b'zstd-8mb') | outstream.setencoder(globalui, b'zstd-8mb') | ||||
response1 = b''.join( | response1 = b''.join( | ||||
cborutil.streamencode( | cborutil.streamencode( | ||||
{b'status': b'ok', b'extra': b'response1' * 10,} | { | ||||
b'status': b'ok', | |||||
b'extra': b'response1' * 10, | |||||
} | |||||
) | ) | ||||
) | ) | ||||
response2 = b''.join( | response2 = b''.join( | ||||
cborutil.streamencode( | cborutil.streamencode( | ||||
{b'status': b'error', b'extra': b'response2' * 10,} | { | ||||
b'status': b'error', | |||||
b'extra': b'response2' * 10, | |||||
} | |||||
) | ) | ||||
) | ) | ||||
action, meta = sendframe( | action, meta = sendframe( | ||||
reactor, | reactor, | ||||
ffs( | ffs( | ||||
b'%d 2 stream-begin stream-settings eos cbor:b"zstd-8mb"' | b'%d 2 stream-begin stream-settings eos cbor:b"zstd-8mb"' | ||||
% request1.requestid | % request1.requestid |
), | ), | ||||
], | ], | ||||
) | ) | ||||
def testtextoutput2simpleatoms(self): | def testtextoutput2simpleatoms(self): | ||||
stream = framing.stream(1) | stream = framing.stream(1) | ||||
val = list( | val = list( | ||||
framing.createtextoutputframe( | framing.createtextoutputframe( | ||||
stream, 1, [(b'foo', [], []), (b'bar', [], []),] | stream, | ||||
1, | |||||
[ | |||||
(b'foo', [], []), | |||||
(b'bar', [], []), | |||||
], | |||||
) | ) | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
val, | val, | ||||
[ | [ | ||||
ffs( | ffs( | ||||
b'1 1 stream-begin text-output 0 ' | b'1 1 stream-begin text-output 0 ' | ||||
b"cbor:[{b'msg': b'foo'}, {b'msg': b'bar'}]" | b"cbor:[{b'msg': b'foo'}, {b'msg': b'bar'}]" | ||||
) | ) | ||||
], | ], | ||||
) | ) | ||||
def testtextoutput1arg(self): | def testtextoutput1arg(self): | ||||
stream = framing.stream(1) | stream = framing.stream(1) | ||||
val = list( | val = list( | ||||
framing.createtextoutputframe( | framing.createtextoutputframe( | ||||
stream, 1, [(b'foo %s', [b'val1'], []),] | stream, | ||||
1, | |||||
[ | |||||
(b'foo %s', [b'val1'], []), | |||||
], | |||||
) | ) | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
val, | val, | ||||
[ | [ | ||||
ffs( | ffs( | ||||
b'1 1 stream-begin text-output 0 ' | b'1 1 stream-begin text-output 0 ' | ||||
b"cbor:[{b'msg': b'foo %s', b'args': [b'val1']}]" | b"cbor:[{b'msg': b'foo %s', b'args': [b'val1']}]" | ||||
) | ) | ||||
], | ], | ||||
) | ) | ||||
def testtextoutput2arg(self): | def testtextoutput2arg(self): | ||||
stream = framing.stream(1) | stream = framing.stream(1) | ||||
val = list( | val = list( | ||||
framing.createtextoutputframe( | framing.createtextoutputframe( | ||||
stream, 1, [(b'foo %s %s', [b'val', b'value'], []),] | stream, | ||||
1, | |||||
[ | |||||
(b'foo %s %s', [b'val', b'value'], []), | |||||
], | |||||
) | ) | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
val, | val, | ||||
[ | [ | ||||
ffs( | ffs( | ||||
b'1 1 stream-begin text-output 0 ' | b'1 1 stream-begin text-output 0 ' | ||||
b"cbor:[{b'msg': b'foo %s %s', b'args': [b'val', b'value']}]" | b"cbor:[{b'msg': b'foo %s %s', b'args': [b'val', b'value']}]" | ||||
) | ) | ||||
], | ], | ||||
) | ) | ||||
def testtextoutput1label(self): | def testtextoutput1label(self): | ||||
stream = framing.stream(1) | stream = framing.stream(1) | ||||
val = list( | val = list( | ||||
framing.createtextoutputframe( | framing.createtextoutputframe( | ||||
stream, 1, [(b'foo', [], [b'label']),] | stream, | ||||
1, | |||||
[ | |||||
(b'foo', [], [b'label']), | |||||
], | |||||
) | ) | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
val, | val, | ||||
[ | [ | ||||
ffs( | ffs( | ||||
b'1 1 stream-begin text-output 0 ' | b'1 1 stream-begin text-output 0 ' | ||||
b"cbor:[{b'msg': b'foo', b'labels': [b'label']}]" | b"cbor:[{b'msg': b'foo', b'labels': [b'label']}]" | ||||
) | ) | ||||
], | ], | ||||
) | ) | ||||
def testargandlabel(self): | def testargandlabel(self): | ||||
stream = framing.stream(1) | stream = framing.stream(1) | ||||
val = list( | val = list( | ||||
framing.createtextoutputframe( | framing.createtextoutputframe( | ||||
stream, 1, [(b'foo %s', [b'arg'], [b'label']),] | stream, | ||||
1, | |||||
[ | |||||
(b'foo %s', [b'arg'], [b'label']), | |||||
], | |||||
) | ) | ||||
) | ) | ||||
self.assertEqual( | self.assertEqual( | ||||
val, | val, | ||||
[ | [ | ||||
ffs( | ffs( | ||||
b'1 1 stream-begin text-output 0 ' | b'1 1 stream-begin text-output 0 ' |
results = list(sendframes(reactor, frames)) | results = list(sendframes(reactor, frames)) | ||||
self.assertaction(results[-1], b'runcommand') | self.assertaction(results[-1], b'runcommand') | ||||
self.assertEqual( | self.assertEqual( | ||||
results[-1][1], | results[-1][1], | ||||
{ | { | ||||
b'requestid': 1, | b'requestid': 1, | ||||
b'command': b'command', | b'command': b'command', | ||||
b'args': {b'key': b'val', b'foo': b'bar',}, | b'args': { | ||||
b'key': b'val', | |||||
b'foo': b'bar', | |||||
}, | |||||
b'redirect': None, | b'redirect': None, | ||||
b'data': b'value1value2', | b'data': b'value1value2', | ||||
}, | }, | ||||
) | ) | ||||
def testnewandcontinuation(self): | def testnewandcontinuation(self): | ||||
result = self._sendsingleframe( | result = self._sendsingleframe( | ||||
makereactor(), | makereactor(), | ||||
), | ), | ||||
], | ], | ||||
) | ) | ||||
) | ) | ||||
self.assertaction(results[0], b'wantframe') | self.assertaction(results[0], b'wantframe') | ||||
self.assertaction(results[1], b'error') | self.assertaction(results[1], b'error') | ||||
self.assertEqual( | self.assertEqual( | ||||
results[1][1], {b'message': b'request with ID 1 already received',} | results[1][1], | ||||
{ | |||||
b'message': b'request with ID 1 already received', | |||||
}, | |||||
) | ) | ||||
def testinterleavedcommands(self): | def testinterleavedcommands(self): | ||||
cbor1 = b''.join( | cbor1 = b''.join( | ||||
cborutil.streamencode( | cborutil.streamencode( | ||||
{ | { | ||||
b'name': b'command1', | b'name': b'command1', | ||||
b'args': {b'foo': b'bar', b'key1': b'val',}, | b'args': { | ||||
b'foo': b'bar', | |||||
b'key1': b'val', | |||||
}, | |||||
} | } | ||||
) | ) | ||||
) | ) | ||||
cbor3 = b''.join( | cbor3 = b''.join( | ||||
cborutil.streamencode( | cborutil.streamencode( | ||||
{ | { | ||||
b'name': b'command3', | b'name': b'command3', | ||||
b'args': {b'biz': b'baz', b'key': b'val',}, | b'args': { | ||||
b'biz': b'baz', | |||||
b'key': b'val', | |||||
}, | |||||
} | } | ||||
) | ) | ||||
) | ) | ||||
results = list( | results = list( | ||||
sendframes( | sendframes( | ||||
makereactor(), | makereactor(), | ||||
[ | [ | ||||
), | ), | ||||
ffs(b'1 1 0 command-data 0 data'), | ffs(b'1 1 0 command-data 0 data'), | ||||
] | ] | ||||
results = list(sendframes(makereactor(), frames)) | results = list(sendframes(makereactor(), frames)) | ||||
self.assertEqual(len(results), 2) | self.assertEqual(len(results), 2) | ||||
self.assertaction(results[0], b'wantframe') | self.assertaction(results[0], b'wantframe') | ||||
self.assertaction(results[1], b'error') | self.assertaction(results[1], b'error') | ||||
self.assertEqual( | self.assertEqual( | ||||
results[1][1], {b'message': b'command data frame without flags',} | results[1][1], | ||||
{ | |||||
b'message': b'command data frame without flags', | |||||
}, | |||||
) | ) | ||||
def testframefornonreceivingrequest(self): | def testframefornonreceivingrequest(self): | ||||
"""Receiving a frame for a command that is not receiving is illegal.""" | """Receiving a frame for a command that is not receiving is illegal.""" | ||||
results = list( | results = list( | ||||
sendframes( | sendframes( | ||||
makereactor(), | makereactor(), | ||||
[ | [ | ||||
"""Receiving a request ID that matches a request that isn't finished.""" | """Receiving a request ID that matches a request that isn't finished.""" | ||||
reactor = makereactor() | reactor = makereactor() | ||||
stream = framing.stream(1) | stream = framing.stream(1) | ||||
list(sendcommandframes(reactor, stream, 1, b'command1', {})) | list(sendcommandframes(reactor, stream, 1, b'command1', {})) | ||||
results = list(sendcommandframes(reactor, stream, 1, b'command1', {})) | results = list(sendcommandframes(reactor, stream, 1, b'command1', {})) | ||||
self.assertaction(results[0], b'error') | self.assertaction(results[0], b'error') | ||||
self.assertEqual( | self.assertEqual( | ||||
results[0][1], {b'message': b'request with ID 1 is already active',} | results[0][1], | ||||
{ | |||||
b'message': b'request with ID 1 is already active', | |||||
}, | |||||
) | ) | ||||
def testduplicaterequestonactivecommandnosend(self): | def testduplicaterequestonactivecommandnosend(self): | ||||
"""Same as above but we've registered a response but haven't sent it.""" | """Same as above but we've registered a response but haven't sent it.""" | ||||
reactor = makereactor() | reactor = makereactor() | ||||
instream = framing.stream(1) | instream = framing.stream(1) | ||||
list(sendcommandframes(reactor, instream, 1, b'command1', {})) | list(sendcommandframes(reactor, instream, 1, b'command1', {})) | ||||
outstream = reactor.makeoutputstream() | outstream = reactor.makeoutputstream() | ||||
reactor.oncommandresponsereadyobjects(outstream, 1, [b'response']) | reactor.oncommandresponsereadyobjects(outstream, 1, [b'response']) | ||||
# We've registered the response but haven't sent it. From the | # We've registered the response but haven't sent it. From the | ||||
# perspective of the reactor, the command is still active. | # perspective of the reactor, the command is still active. | ||||
results = list(sendcommandframes(reactor, instream, 1, b'command1', {})) | results = list(sendcommandframes(reactor, instream, 1, b'command1', {})) | ||||
self.assertaction(results[0], b'error') | self.assertaction(results[0], b'error') | ||||
self.assertEqual( | self.assertEqual( | ||||
results[0][1], {b'message': b'request with ID 1 is already active',} | results[0][1], | ||||
{ | |||||
b'message': b'request with ID 1 is already active', | |||||
}, | |||||
) | ) | ||||
def testduplicaterequestaftersend(self): | def testduplicaterequestaftersend(self): | ||||
"""We can use a duplicate request ID after we've sent the response.""" | """We can use a duplicate request ID after we've sent the response.""" | ||||
reactor = makereactor() | reactor = makereactor() | ||||
instream = framing.stream(1) | instream = framing.stream(1) | ||||
list(sendcommandframes(reactor, instream, 1, b'command1', {})) | list(sendcommandframes(reactor, instream, 1, b'command1', {})) | ||||
outstream = reactor.makeoutputstream() | outstream = reactor.makeoutputstream() | ||||
reactor._sendersettings[b'contentencodings'], [b'a', b'b'] | reactor._sendersettings[b'contentencodings'], [b'a', b'b'] | ||||
) | ) | ||||
def testprotocolsettingsmultipleframes(self): | def testprotocolsettingsmultipleframes(self): | ||||
reactor = makereactor() | reactor = makereactor() | ||||
data = b''.join( | data = b''.join( | ||||
cborutil.streamencode( | cborutil.streamencode( | ||||
{b'contentencodings': [b'value1', b'value2'],} | { | ||||
b'contentencodings': [b'value1', b'value2'], | |||||
} | |||||
) | ) | ||||
) | ) | ||||
results = list( | results = list( | ||||
sendframes( | sendframes( | ||||
reactor, | reactor, | ||||
[ | [ | ||||
ffs( | ffs( | ||||
self.assertEqual(len(results), 1) | self.assertEqual(len(results), 1) | ||||
self.assertaction(results[0], b'runcommand') | self.assertaction(results[0], b'runcommand') | ||||
result = self._sendsingleframe( | result = self._sendsingleframe( | ||||
reactor, ffs(b'0 1 0 sender-protocol-settings eos ') | reactor, ffs(b'0 1 0 sender-protocol-settings eos ') | ||||
) | ) | ||||
self.assertaction(result, b'error') | self.assertaction(result, b'error') | ||||
self.assertEqual( | self.assertEqual( | ||||
result[1], {b'message': b'expected command request frame; got 8',} | result[1], | ||||
{ | |||||
b'message': b'expected command request frame; got 8', | |||||
}, | |||||
) | ) | ||||
if __name__ == '__main__': | if __name__ == '__main__': | ||||
import silenttestrunner | import silenttestrunner | ||||
silenttestrunner.main(__name__) | silenttestrunner.main(__name__) |
self.assertEqual(r.dispatchparts, []) | self.assertEqual(r.dispatchparts, []) | ||||
self.assertIsNone(r.dispatchpath) | self.assertIsNone(r.dispatchpath) | ||||
self.assertIsNone(r.reponame) | self.assertIsNone(r.reponame) | ||||
self.assertEqual(r.querystring, b'') | self.assertEqual(r.querystring, b'') | ||||
self.assertEqual(len(r.qsparams), 0) | self.assertEqual(len(r.qsparams), 0) | ||||
self.assertEqual(len(r.headers), 0) | self.assertEqual(len(r.headers), 0) | ||||
def testcustomport(self): | def testcustomport(self): | ||||
r = parse(DEFAULT_ENV, extra={'SERVER_PORT': '8000',}) | r = parse( | ||||
DEFAULT_ENV, | |||||
extra={ | |||||
'SERVER_PORT': '8000', | |||||
}, | |||||
) | |||||
self.assertEqual(r.url, b'http://testserver:8000') | self.assertEqual(r.url, b'http://testserver:8000') | ||||
self.assertEqual(r.baseurl, b'http://testserver:8000') | self.assertEqual(r.baseurl, b'http://testserver:8000') | ||||
self.assertEqual(r.advertisedurl, b'http://testserver:8000') | self.assertEqual(r.advertisedurl, b'http://testserver:8000') | ||||
self.assertEqual(r.advertisedbaseurl, b'http://testserver:8000') | self.assertEqual(r.advertisedbaseurl, b'http://testserver:8000') | ||||
r = parse( | r = parse( | ||||
DEFAULT_ENV, | DEFAULT_ENV, | ||||
extra={'SERVER_PORT': '4000', 'wsgi.url_scheme': 'https',}, | extra={ | ||||
'SERVER_PORT': '4000', | |||||
'wsgi.url_scheme': 'https', | |||||
}, | |||||
) | ) | ||||
self.assertEqual(r.url, b'https://testserver:4000') | self.assertEqual(r.url, b'https://testserver:4000') | ||||
self.assertEqual(r.baseurl, b'https://testserver:4000') | self.assertEqual(r.baseurl, b'https://testserver:4000') | ||||
self.assertEqual(r.advertisedurl, b'https://testserver:4000') | self.assertEqual(r.advertisedurl, b'https://testserver:4000') | ||||
self.assertEqual(r.advertisedbaseurl, b'https://testserver:4000') | self.assertEqual(r.advertisedbaseurl, b'https://testserver:4000') | ||||
def testhttphost(self): | def testhttphost(self): | ||||
r = parse(DEFAULT_ENV, extra={'HTTP_HOST': 'altserver',}) | r = parse( | ||||
DEFAULT_ENV, | |||||
extra={ | |||||
'HTTP_HOST': 'altserver', | |||||
}, | |||||
) | |||||
self.assertEqual(r.url, b'http://altserver') | self.assertEqual(r.url, b'http://altserver') | ||||
self.assertEqual(r.baseurl, b'http://altserver') | self.assertEqual(r.baseurl, b'http://altserver') | ||||
self.assertEqual(r.advertisedurl, b'http://testserver') | self.assertEqual(r.advertisedurl, b'http://testserver') | ||||
self.assertEqual(r.advertisedbaseurl, b'http://testserver') | self.assertEqual(r.advertisedbaseurl, b'http://testserver') | ||||
def testscriptname(self): | def testscriptname(self): | ||||
r = parse(DEFAULT_ENV, extra={'SCRIPT_NAME': '',}) | r = parse( | ||||
DEFAULT_ENV, | |||||
extra={ | |||||
'SCRIPT_NAME': '', | |||||
}, | |||||
) | |||||
self.assertEqual(r.url, b'http://testserver') | self.assertEqual(r.url, b'http://testserver') | ||||
self.assertEqual(r.baseurl, b'http://testserver') | self.assertEqual(r.baseurl, b'http://testserver') | ||||
self.assertEqual(r.advertisedurl, b'http://testserver') | self.assertEqual(r.advertisedurl, b'http://testserver') | ||||
self.assertEqual(r.advertisedbaseurl, b'http://testserver') | self.assertEqual(r.advertisedbaseurl, b'http://testserver') | ||||
self.assertEqual(r.apppath, b'') | self.assertEqual(r.apppath, b'') | ||||
self.assertEqual(r.dispatchparts, []) | self.assertEqual(r.dispatchparts, []) | ||||
self.assertIsNone(r.dispatchpath) | self.assertIsNone(r.dispatchpath) | ||||
r = parse(DEFAULT_ENV, extra={'SCRIPT_NAME': '/script',}) | r = parse( | ||||
DEFAULT_ENV, | |||||
extra={ | |||||
'SCRIPT_NAME': '/script', | |||||
}, | |||||
) | |||||
self.assertEqual(r.url, b'http://testserver/script') | self.assertEqual(r.url, b'http://testserver/script') | ||||
self.assertEqual(r.baseurl, b'http://testserver') | self.assertEqual(r.baseurl, b'http://testserver') | ||||
self.assertEqual(r.advertisedurl, b'http://testserver/script') | self.assertEqual(r.advertisedurl, b'http://testserver/script') | ||||
self.assertEqual(r.advertisedbaseurl, b'http://testserver') | self.assertEqual(r.advertisedbaseurl, b'http://testserver') | ||||
self.assertEqual(r.apppath, b'/script') | self.assertEqual(r.apppath, b'/script') | ||||
self.assertEqual(r.dispatchparts, []) | self.assertEqual(r.dispatchparts, []) | ||||
self.assertIsNone(r.dispatchpath) | self.assertIsNone(r.dispatchpath) | ||||
r = parse(DEFAULT_ENV, extra={'SCRIPT_NAME': '/multiple words',}) | r = parse( | ||||
DEFAULT_ENV, | |||||
extra={ | |||||
'SCRIPT_NAME': '/multiple words', | |||||
}, | |||||
) | |||||
self.assertEqual(r.url, b'http://testserver/multiple%20words') | self.assertEqual(r.url, b'http://testserver/multiple%20words') | ||||
self.assertEqual(r.baseurl, b'http://testserver') | self.assertEqual(r.baseurl, b'http://testserver') | ||||
self.assertEqual(r.advertisedurl, b'http://testserver/multiple%20words') | self.assertEqual(r.advertisedurl, b'http://testserver/multiple%20words') | ||||
self.assertEqual(r.advertisedbaseurl, b'http://testserver') | self.assertEqual(r.advertisedbaseurl, b'http://testserver') | ||||
self.assertEqual(r.apppath, b'/multiple words') | self.assertEqual(r.apppath, b'/multiple words') | ||||
self.assertEqual(r.dispatchparts, []) | self.assertEqual(r.dispatchparts, []) | ||||
self.assertIsNone(r.dispatchpath) | self.assertIsNone(r.dispatchpath) | ||||
def testpathinfo(self): | def testpathinfo(self): | ||||
r = parse(DEFAULT_ENV, extra={'PATH_INFO': '',}) | r = parse( | ||||
DEFAULT_ENV, | |||||
extra={ | |||||
'PATH_INFO': '', | |||||
}, | |||||
) | |||||
self.assertEqual(r.url, b'http://testserver') | self.assertEqual(r.url, b'http://testserver') | ||||
self.assertEqual(r.baseurl, b'http://testserver') | self.assertEqual(r.baseurl, b'http://testserver') | ||||
self.assertEqual(r.advertisedurl, b'http://testserver') | self.assertEqual(r.advertisedurl, b'http://testserver') | ||||
self.assertEqual(r.advertisedbaseurl, b'http://testserver') | self.assertEqual(r.advertisedbaseurl, b'http://testserver') | ||||
self.assertEqual(r.apppath, b'') | self.assertEqual(r.apppath, b'') | ||||
self.assertEqual(r.dispatchparts, []) | self.assertEqual(r.dispatchparts, []) | ||||
self.assertEqual(r.dispatchpath, b'') | self.assertEqual(r.dispatchpath, b'') | ||||
r = parse(DEFAULT_ENV, extra={'PATH_INFO': '/pathinfo',}) | r = parse( | ||||
DEFAULT_ENV, | |||||
extra={ | |||||
'PATH_INFO': '/pathinfo', | |||||
}, | |||||
) | |||||
self.assertEqual(r.url, b'http://testserver/pathinfo') | self.assertEqual(r.url, b'http://testserver/pathinfo') | ||||
self.assertEqual(r.baseurl, b'http://testserver') | self.assertEqual(r.baseurl, b'http://testserver') | ||||
self.assertEqual(r.advertisedurl, b'http://testserver/pathinfo') | self.assertEqual(r.advertisedurl, b'http://testserver/pathinfo') | ||||
self.assertEqual(r.advertisedbaseurl, b'http://testserver') | self.assertEqual(r.advertisedbaseurl, b'http://testserver') | ||||
self.assertEqual(r.apppath, b'') | self.assertEqual(r.apppath, b'') | ||||
self.assertEqual(r.dispatchparts, [b'pathinfo']) | self.assertEqual(r.dispatchparts, [b'pathinfo']) | ||||
self.assertEqual(r.dispatchpath, b'pathinfo') | self.assertEqual(r.dispatchpath, b'pathinfo') | ||||
r = parse(DEFAULT_ENV, extra={'PATH_INFO': '/one/two/',}) | r = parse( | ||||
DEFAULT_ENV, | |||||
extra={ | |||||
'PATH_INFO': '/one/two/', | |||||
}, | |||||
) | |||||
self.assertEqual(r.url, b'http://testserver/one/two/') | self.assertEqual(r.url, b'http://testserver/one/two/') | ||||
self.assertEqual(r.baseurl, b'http://testserver') | self.assertEqual(r.baseurl, b'http://testserver') | ||||
self.assertEqual(r.advertisedurl, b'http://testserver/one/two/') | self.assertEqual(r.advertisedurl, b'http://testserver/one/two/') | ||||
self.assertEqual(r.advertisedbaseurl, b'http://testserver') | self.assertEqual(r.advertisedbaseurl, b'http://testserver') | ||||
self.assertEqual(r.apppath, b'') | self.assertEqual(r.apppath, b'') | ||||
self.assertEqual(r.dispatchparts, [b'one', b'two']) | self.assertEqual(r.dispatchparts, [b'one', b'two']) | ||||
self.assertEqual(r.dispatchpath, b'one/two') | self.assertEqual(r.dispatchpath, b'one/two') | ||||
def testscriptandpathinfo(self): | def testscriptandpathinfo(self): | ||||
r = parse( | r = parse( | ||||
DEFAULT_ENV, | DEFAULT_ENV, | ||||
extra={'SCRIPT_NAME': '/script', 'PATH_INFO': '/pathinfo',}, | extra={ | ||||
'SCRIPT_NAME': '/script', | |||||
'PATH_INFO': '/pathinfo', | |||||
}, | |||||
) | ) | ||||
self.assertEqual(r.url, b'http://testserver/script/pathinfo') | self.assertEqual(r.url, b'http://testserver/script/pathinfo') | ||||
self.assertEqual(r.baseurl, b'http://testserver') | self.assertEqual(r.baseurl, b'http://testserver') | ||||
self.assertEqual(r.advertisedurl, b'http://testserver/script/pathinfo') | self.assertEqual(r.advertisedurl, b'http://testserver/script/pathinfo') | ||||
self.assertEqual(r.advertisedbaseurl, b'http://testserver') | self.assertEqual(r.advertisedbaseurl, b'http://testserver') | ||||
self.assertEqual(r.apppath, b'/script') | self.assertEqual(r.apppath, b'/script') | ||||
self.assertEqual(r.dispatchparts, [b'pathinfo']) | self.assertEqual(r.dispatchparts, [b'pathinfo']) | ||||
parse(DEFAULT_ENV, reponame=b'repo') | parse(DEFAULT_ENV, reponame=b'repo') | ||||
with self.assertRaisesRegex( | with self.assertRaisesRegex( | ||||
error.ProgrammingError, 'PATH_INFO does not begin with repo ' 'name' | error.ProgrammingError, 'PATH_INFO does not begin with repo ' 'name' | ||||
): | ): | ||||
parse( | parse( | ||||
DEFAULT_ENV, | DEFAULT_ENV, | ||||
reponame=b'repo', | reponame=b'repo', | ||||
extra={'PATH_INFO': '/pathinfo',}, | extra={ | ||||
'PATH_INFO': '/pathinfo', | |||||
}, | |||||
) | ) | ||||
with self.assertRaisesRegex( | with self.assertRaisesRegex( | ||||
error.ProgrammingError, 'reponame prefix of PATH_INFO' | error.ProgrammingError, 'reponame prefix of PATH_INFO' | ||||
): | ): | ||||
parse( | parse( | ||||
DEFAULT_ENV, | DEFAULT_ENV, | ||||
reponame=b'repo', | reponame=b'repo', | ||||
extra={'PATH_INFO': '/repoextra/path',}, | extra={ | ||||
'PATH_INFO': '/repoextra/path', | |||||
}, | |||||
) | ) | ||||
r = parse( | r = parse( | ||||
DEFAULT_ENV, | DEFAULT_ENV, | ||||
reponame=b'repo', | reponame=b'repo', | ||||
extra={'PATH_INFO': '/repo/path1/path2',}, | extra={ | ||||
'PATH_INFO': '/repo/path1/path2', | |||||
}, | |||||
) | ) | ||||
self.assertEqual(r.url, b'http://testserver/repo/path1/path2') | self.assertEqual(r.url, b'http://testserver/repo/path1/path2') | ||||
self.assertEqual(r.baseurl, b'http://testserver') | self.assertEqual(r.baseurl, b'http://testserver') | ||||
self.assertEqual(r.advertisedurl, b'http://testserver/repo/path1/path2') | self.assertEqual(r.advertisedurl, b'http://testserver/repo/path1/path2') | ||||
self.assertEqual(r.advertisedbaseurl, b'http://testserver') | self.assertEqual(r.advertisedbaseurl, b'http://testserver') | ||||
self.assertEqual(r.apppath, b'/repo') | self.assertEqual(r.apppath, b'/repo') | ||||
self.assertEqual(r.dispatchparts, [b'path1', b'path2']) | self.assertEqual(r.dispatchparts, [b'path1', b'path2']) | ||||
self.assertEqual(r.dispatchpath, b'path1/path2') | self.assertEqual(r.dispatchpath, b'path1/path2') | ||||
self.assertEqual(r.reponame, b'repo') | self.assertEqual(r.reponame, b'repo') | ||||
r = parse( | r = parse( | ||||
DEFAULT_ENV, | DEFAULT_ENV, | ||||
reponame=b'prefix/repo', | reponame=b'prefix/repo', | ||||
extra={'PATH_INFO': '/prefix/repo/path1/path2',}, | extra={ | ||||
'PATH_INFO': '/prefix/repo/path1/path2', | |||||
}, | |||||
) | ) | ||||
self.assertEqual(r.url, b'http://testserver/prefix/repo/path1/path2') | self.assertEqual(r.url, b'http://testserver/prefix/repo/path1/path2') | ||||
self.assertEqual(r.baseurl, b'http://testserver') | self.assertEqual(r.baseurl, b'http://testserver') | ||||
self.assertEqual( | self.assertEqual( | ||||
r.advertisedurl, b'http://testserver/prefix/repo/path1/path2' | r.advertisedurl, b'http://testserver/prefix/repo/path1/path2' | ||||
) | ) | ||||
self.assertEqual(r.advertisedbaseurl, b'http://testserver') | self.assertEqual(r.advertisedbaseurl, b'http://testserver') | ||||
self.assertEqual(r.dispatchparts, []) | self.assertEqual(r.dispatchparts, []) | ||||
self.assertIsNone(r.dispatchpath) | self.assertIsNone(r.dispatchpath) | ||||
self.assertIsNone(r.reponame) | self.assertIsNone(r.reponame) | ||||
# With only PATH_INFO defined. | # With only PATH_INFO defined. | ||||
r = parse( | r = parse( | ||||
DEFAULT_ENV, | DEFAULT_ENV, | ||||
altbaseurl=b'http://altserver', | altbaseurl=b'http://altserver', | ||||
extra={'PATH_INFO': '/path1/path2',}, | extra={ | ||||
'PATH_INFO': '/path1/path2', | |||||
}, | |||||
) | ) | ||||
self.assertEqual(r.url, b'http://testserver/path1/path2') | self.assertEqual(r.url, b'http://testserver/path1/path2') | ||||
self.assertEqual(r.baseurl, b'http://testserver') | self.assertEqual(r.baseurl, b'http://testserver') | ||||
self.assertEqual(r.advertisedurl, b'http://altserver/path1/path2') | self.assertEqual(r.advertisedurl, b'http://altserver/path1/path2') | ||||
self.assertEqual(r.advertisedbaseurl, b'http://altserver') | self.assertEqual(r.advertisedbaseurl, b'http://altserver') | ||||
self.assertEqual(r.urlscheme, b'http') | self.assertEqual(r.urlscheme, b'http') | ||||
self.assertEqual(r.apppath, b'') | self.assertEqual(r.apppath, b'') | ||||
self.assertEqual(r.dispatchparts, [b'path1', b'path2']) | self.assertEqual(r.dispatchparts, [b'path1', b'path2']) | ||||
self.assertEqual(r.dispatchparts, []) | self.assertEqual(r.dispatchparts, []) | ||||
self.assertIsNone(r.dispatchpath) | self.assertIsNone(r.dispatchpath) | ||||
self.assertIsNone(r.reponame) | self.assertIsNone(r.reponame) | ||||
# PATH_INFO + path on alt URL. | # PATH_INFO + path on alt URL. | ||||
r = parse( | r = parse( | ||||
DEFAULT_ENV, | DEFAULT_ENV, | ||||
altbaseurl=b'http://altserver/altpath', | altbaseurl=b'http://altserver/altpath', | ||||
extra={'PATH_INFO': '/path1/path2',}, | extra={ | ||||
'PATH_INFO': '/path1/path2', | |||||
}, | |||||
) | ) | ||||
self.assertEqual(r.url, b'http://testserver/path1/path2') | self.assertEqual(r.url, b'http://testserver/path1/path2') | ||||
self.assertEqual(r.baseurl, b'http://testserver') | self.assertEqual(r.baseurl, b'http://testserver') | ||||
self.assertEqual( | self.assertEqual( | ||||
r.advertisedurl, b'http://altserver/altpath/path1/path2' | r.advertisedurl, b'http://altserver/altpath/path1/path2' | ||||
) | ) | ||||
self.assertEqual(r.advertisedbaseurl, b'http://altserver') | self.assertEqual(r.advertisedbaseurl, b'http://altserver') | ||||
self.assertEqual(r.urlscheme, b'http') | self.assertEqual(r.urlscheme, b'http') | ||||
self.assertEqual(r.apppath, b'/altpath') | self.assertEqual(r.apppath, b'/altpath') | ||||
self.assertEqual(r.dispatchparts, [b'path1', b'path2']) | self.assertEqual(r.dispatchparts, [b'path1', b'path2']) | ||||
self.assertEqual(r.dispatchpath, b'path1/path2') | self.assertEqual(r.dispatchpath, b'path1/path2') | ||||
self.assertIsNone(r.reponame) | self.assertIsNone(r.reponame) | ||||
# PATH_INFO + path on alt URL with trailing slash. | # PATH_INFO + path on alt URL with trailing slash. | ||||
r = parse( | r = parse( | ||||
DEFAULT_ENV, | DEFAULT_ENV, | ||||
altbaseurl=b'http://altserver/altpath/', | altbaseurl=b'http://altserver/altpath/', | ||||
extra={'PATH_INFO': '/path1/path2',}, | extra={ | ||||
'PATH_INFO': '/path1/path2', | |||||
}, | |||||
) | ) | ||||
self.assertEqual(r.url, b'http://testserver/path1/path2') | self.assertEqual(r.url, b'http://testserver/path1/path2') | ||||
self.assertEqual(r.baseurl, b'http://testserver') | self.assertEqual(r.baseurl, b'http://testserver') | ||||
self.assertEqual( | self.assertEqual( | ||||
r.advertisedurl, b'http://altserver/altpath//path1/path2' | r.advertisedurl, b'http://altserver/altpath//path1/path2' | ||||
) | ) | ||||
self.assertEqual(r.advertisedbaseurl, b'http://altserver') | self.assertEqual(r.advertisedbaseurl, b'http://altserver') | ||||
self.assertEqual(r.urlscheme, b'http') | self.assertEqual(r.urlscheme, b'http') | ||||
self.assertEqual(r.apppath, b'/altpath/') | self.assertEqual(r.apppath, b'/altpath/') | ||||
self.assertEqual(r.dispatchparts, [b'path1', b'path2']) | self.assertEqual(r.dispatchparts, [b'path1', b'path2']) | ||||
self.assertEqual(r.dispatchpath, b'path1/path2') | self.assertEqual(r.dispatchpath, b'path1/path2') | ||||
self.assertIsNone(r.reponame) | self.assertIsNone(r.reponame) | ||||
# Local SCRIPT_NAME is ignored. | # Local SCRIPT_NAME is ignored. | ||||
r = parse( | r = parse( | ||||
DEFAULT_ENV, | DEFAULT_ENV, | ||||
altbaseurl=b'http://altserver', | altbaseurl=b'http://altserver', | ||||
extra={'SCRIPT_NAME': '/script', 'PATH_INFO': '/path1/path2',}, | extra={ | ||||
'SCRIPT_NAME': '/script', | |||||
'PATH_INFO': '/path1/path2', | |||||
}, | |||||
) | ) | ||||
self.assertEqual(r.url, b'http://testserver/script/path1/path2') | self.assertEqual(r.url, b'http://testserver/script/path1/path2') | ||||
self.assertEqual(r.baseurl, b'http://testserver') | self.assertEqual(r.baseurl, b'http://testserver') | ||||
self.assertEqual(r.advertisedurl, b'http://altserver/path1/path2') | self.assertEqual(r.advertisedurl, b'http://altserver/path1/path2') | ||||
self.assertEqual(r.advertisedbaseurl, b'http://altserver') | self.assertEqual(r.advertisedbaseurl, b'http://altserver') | ||||
self.assertEqual(r.urlscheme, b'http') | self.assertEqual(r.urlscheme, b'http') | ||||
self.assertEqual(r.apppath, b'') | self.assertEqual(r.apppath, b'') | ||||
self.assertEqual(r.dispatchparts, [b'path1', b'path2']) | self.assertEqual(r.dispatchparts, [b'path1', b'path2']) | ||||
self.assertEqual(r.dispatchpath, b'path1/path2') | self.assertEqual(r.dispatchpath, b'path1/path2') | ||||
self.assertIsNone(r.reponame) | self.assertIsNone(r.reponame) | ||||
# Use remote's path for script name, app path | # Use remote's path for script name, app path | ||||
r = parse( | r = parse( | ||||
DEFAULT_ENV, | DEFAULT_ENV, | ||||
altbaseurl=b'http://altserver/altroot', | altbaseurl=b'http://altserver/altroot', | ||||
extra={'SCRIPT_NAME': '/script', 'PATH_INFO': '/path1/path2',}, | extra={ | ||||
'SCRIPT_NAME': '/script', | |||||
'PATH_INFO': '/path1/path2', | |||||
}, | |||||
) | ) | ||||
self.assertEqual(r.url, b'http://testserver/script/path1/path2') | self.assertEqual(r.url, b'http://testserver/script/path1/path2') | ||||
self.assertEqual(r.baseurl, b'http://testserver') | self.assertEqual(r.baseurl, b'http://testserver') | ||||
self.assertEqual( | self.assertEqual( | ||||
r.advertisedurl, b'http://altserver/altroot/path1/path2' | r.advertisedurl, b'http://altserver/altroot/path1/path2' | ||||
) | ) | ||||
self.assertEqual(r.advertisedbaseurl, b'http://altserver') | self.assertEqual(r.advertisedbaseurl, b'http://altserver') | ||||
self.assertEqual(r.urlscheme, b'http') | self.assertEqual(r.urlscheme, b'http') | ||||
self.assertEqual(r.apppath, b'/altroot') | self.assertEqual(r.apppath, b'/altroot') | ||||
self.assertEqual(r.dispatchparts, [b'path1', b'path2']) | self.assertEqual(r.dispatchparts, [b'path1', b'path2']) | ||||
self.assertEqual(r.dispatchpath, b'path1/path2') | self.assertEqual(r.dispatchpath, b'path1/path2') | ||||
self.assertIsNone(r.reponame) | self.assertIsNone(r.reponame) | ||||
# reponame is factored in properly. | # reponame is factored in properly. | ||||
r = parse( | r = parse( | ||||
DEFAULT_ENV, | DEFAULT_ENV, | ||||
reponame=b'repo', | reponame=b'repo', | ||||
altbaseurl=b'http://altserver/altroot', | altbaseurl=b'http://altserver/altroot', | ||||
extra={'SCRIPT_NAME': '/script', 'PATH_INFO': '/repo/path1/path2',}, | extra={ | ||||
'SCRIPT_NAME': '/script', | |||||
'PATH_INFO': '/repo/path1/path2', | |||||
}, | |||||
) | ) | ||||
self.assertEqual(r.url, b'http://testserver/script/repo/path1/path2') | self.assertEqual(r.url, b'http://testserver/script/repo/path1/path2') | ||||
self.assertEqual(r.baseurl, b'http://testserver') | self.assertEqual(r.baseurl, b'http://testserver') | ||||
self.assertEqual( | self.assertEqual( | ||||
r.advertisedurl, b'http://altserver/altroot/repo/path1/path2' | r.advertisedurl, b'http://altserver/altroot/repo/path1/path2' | ||||
) | ) | ||||
self.assertEqual(r.advertisedbaseurl, b'http://altserver') | self.assertEqual(r.advertisedbaseurl, b'http://altserver') |