merge with stable
authorMatt Harbison <matt_harbison@yahoo.com>
Thu, 18 Mar 2021 18:24:59 -0400
changeset 46794 e2f7b2695ba1
parent 46782 6b52cffd8d0a (diff)
parent 46793 86b47ec1960a (current diff)
child 46795 048beb0167a7
merge with stable
mercurial/branchmap.py
mercurial/changegroup.py
mercurial/cmdutil.py
mercurial/commands.py
mercurial/commit.py
mercurial/context.py
mercurial/debugcommands.py
mercurial/dirstate.py
mercurial/error.py
mercurial/hg.py
mercurial/localrepo.py
mercurial/logcmdutil.py
mercurial/merge.py
mercurial/revlogutils/nodemap.py
mercurial/subrepo.py
mercurial/upgrade_utils/engine.py
tests/test-rebase-conflicts.t
--- a/Makefile	Sat Mar 13 02:09:23 2021 -0500
+++ b/Makefile	Thu Mar 18 18:24:59 2021 -0400
@@ -68,6 +68,12 @@
 build:
 	$(PYTHON) setup.py $(PURE) build $(COMPILERFLAG)
 
+build-chg:
+	make -C contrib/chg
+
+build-rhg:
+	(cd rust/rhg; cargo build --release)
+
 wheel:
 	FORCE_SETUPTOOLS=1 $(PYTHON) setup.py $(PURE) bdist_wheel $(COMPILERFLAG)
 
@@ -96,6 +102,9 @@
 install-bin: build
 	$(PYTHON) setup.py $(PURE) install --root="$(DESTDIR)/" --prefix="$(PREFIX)" --force
 
+install-chg: build-chg
+	make -C contrib/chg install PREFIX="$(PREFIX)"
+
 install-doc: doc
 	cd doc && $(MAKE) $(MFLAGS) install
 
@@ -107,6 +116,9 @@
 install-home-doc: doc
 	cd doc && $(MAKE) $(MFLAGS) PREFIX="$(HOME)" install
 
+install-rhg: build-rhg
+	install -m 755 rust/target/release/rhg "$(PREFIX)"/bin/
+
 MANIFEST-doc:
 	$(MAKE) -C doc MANIFEST
 
--- a/README.rst	Sat Mar 13 02:09:23 2021 -0500
+++ b/README.rst	Thu Mar 18 18:24:59 2021 -0400
@@ -18,3 +18,13 @@
 
 See https://mercurial-scm.org/ for detailed installation
 instructions, platform-specific notes, and Mercurial user information.
+
+Notes for packagers
+===================
+
+Mercurial ships a copy of the python-zstandard sources. This is used to
+provide support for zstd compression and decompression functionality. The
+module is not intended to be replaced by the plain python-zstandard nor
+is it intended to use a system zstd library. Patches can result in hard
+to diagnose errors and are explicitly discouraged as unsupported
+configuration.
--- a/black.toml	Sat Mar 13 02:09:23 2021 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,14 +0,0 @@
-[tool.black]
-line-length = 80
-exclude = '''
-build/
-| wheelhouse/
-| dist/
-| packages/
-| \.hg/
-| \.mypy_cache/
-| \.venv/
-| mercurial/thirdparty/
-'''
-skip-string-normalization = true
-quiet = true
--- a/contrib/clang-format-ignorelist	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/clang-format-ignorelist	Thu Mar 18 18:24:59 2021 -0400
@@ -9,3 +9,4 @@
 hgext/fsmonitor/pywatchman/**.c
 mercurial/thirdparty/**.c
 mercurial/thirdparty/**.h
+mercurial/pythoncapi_compat.h
--- a/contrib/examples/fix.hgrc	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/examples/fix.hgrc	Thu Mar 18 18:24:59 2021 -0400
@@ -5,7 +5,7 @@
 rustfmt:command = rustfmt +nightly
 rustfmt:pattern = set:"**.rs" - "mercurial/thirdparty/**"
 
-black:command = black --config=black.toml -
+black:command = black
 black:pattern = set:**.py - mercurial/thirdparty/**
 
 # Mercurial doesn't have any Go code, but if we did this is how we
--- a/contrib/fuzz/Makefile	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/fuzz/Makefile	Thu Mar 18 18:24:59 2021 -0400
@@ -1,5 +1,5 @@
-CC = clang
-CXX = clang++
+CC ?= clang
+CXX ?= clang++
 
 # By default, use our own standalone_fuzz_target_runner.
 # This runner does no fuzzing, but simply executes the inputs
--- a/contrib/heptapod-ci.yml	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/heptapod-ci.yml	Thu Mar 18 18:24:59 2021 -0400
@@ -7,6 +7,8 @@
 variables:
     PYTHON: python
     TEST_HGMODULEPOLICY: "allow"
+    HG_CI_IMAGE_TAG: "latest"
+    TEST_HGTESTS_ALLOW_NETIO: "0"
 
 .runtests_template: &runtests
     stage: tests
@@ -17,21 +19,12 @@
       - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
       - cd /tmp/mercurial-ci/
       - ls -1 tests/test-check-*.* > /tmp/check-tests.txt
+      - black --version
+      - clang-format --version
     script:
         - echo "python used, $PYTHON"
         - echo "$RUNTEST_ARGS"
-        - HGMODULEPOLICY="$TEST_HGMODULEPOLICY" "$PYTHON" tests/run-tests.py --color=always $RUNTEST_ARGS
-
-
-.rust_template: &rust
-    before_script:
-      - hg clone . /tmp/mercurial-ci/ --noupdate --config phases.publish=no
-      - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
-      - ls -1 tests/test-check-*.* > /tmp/check-tests.txt
-      - cd /tmp/mercurial-ci/rust/rhg
-      - cargo build
-      - cd /tmp/mercurial-ci/
-
+        - HGTESTS_ALLOW_NETIO="$TEST_HGTESTS_ALLOW_NETIO" HGMODULEPOLICY="$TEST_HGMODULEPOLICY" "$PYTHON" tests/run-tests.py --color=always $RUNTEST_ARGS
 
 checks-py2:
     <<: *runtests
@@ -66,6 +59,7 @@
     variables:
         RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt"
         TEST_HGMODULEPOLICY: "c"
+        TEST_HGTESTS_ALLOW_NETIO: "1"
 
 test-py3:
     <<: *runtests
@@ -73,6 +67,7 @@
         RUNTEST_ARGS: " --no-rust --blacklist /tmp/check-tests.txt"
         PYTHON: python3
         TEST_HGMODULEPOLICY: "c"
+        TEST_HGTESTS_ALLOW_NETIO: "1"
 
 test-py2-pure:
     <<: *runtests
@@ -89,7 +84,6 @@
 
 test-py2-rust:
     <<: *runtests
-    <<: *rust
     variables:
         HGWITHRUSTEXT: cpython
         RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt"
@@ -97,13 +91,20 @@
 
 test-py3-rust:
     <<: *runtests
-    <<: *rust
     variables:
         HGWITHRUSTEXT: cpython
         RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt"
         PYTHON: python3
         TEST_HGMODULEPOLICY: "rust+c"
 
+test-py3-rhg:
+    <<: *runtests
+    variables:
+        HGWITHRUSTEXT: cpython
+        RUNTEST_ARGS: "--rust --rhg --blacklist /tmp/check-tests.txt"
+        PYTHON: python3
+        TEST_HGMODULEPOLICY: "rust+c"
+
 test-py2-chg:
     <<: *runtests
     variables:
--- a/contrib/packaging/debian/control	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/packaging/debian/control	Thu Mar 18 18:24:59 2021 -0400
@@ -25,7 +25,9 @@
 Suggests: wish
 Replaces: mercurial-common
 Breaks: mercurial-common
+Provides: python3-mercurial
 Architecture: any
+Homepage: https://www.mercurial-scm.org/
 Description: fast, easy to use, distributed revision control tool.
  Mercurial is a fast, lightweight Source Control Management system designed
  for efficient handling of very large distributed projects.
--- a/contrib/packaging/debian/rules	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/packaging/debian/rules	Thu Mar 18 18:24:59 2021 -0400
@@ -18,6 +18,10 @@
 #   DEB_HG_PYTHON_VERSIONS="3.7 3.8" make deb
 DEB_HG_MULTI_VERSION?=0
 
+# Set to 1 to make /usr/bin/hg a symlink to chg, and move hg to
+# /usr/lib/mercurial/hg.
+DEB_HG_CHG_BY_DEFAULT?=0
+
 CPUS=$(shell cat /proc/cpuinfo | grep -E ^processor | wc -l)
 
 # By default, only build for the version of python3 that the system considers
@@ -40,6 +44,12 @@
 	DEB_HG_PYTHON_VERSIONS?=$(shell py3versions -vd)
 endif
 
+ifeq ($(DEB_HG_CHG_BY_DEFAULT), 1)
+	# Important: the "real" hg must have a 'basename' of 'hg'. Otherwise, hg
+	# behaves differently when setting $HG and breaks aliases that use that.
+	export HGPATH=/usr/lib/mercurial/hg
+endif
+
 export HGPYTHON3=1
 export PYTHON=python3
 
@@ -86,3 +96,8 @@
 	cp contrib/bash_completion "$(CURDIR)"/debian/mercurial/usr/share/bash-completion/completions/hg
 	mkdir -p "$(CURDIR)"/debian/mercurial/usr/share/zsh/vendor-completions
 	cp contrib/zsh_completion "$(CURDIR)"/debian/mercurial/usr/share/zsh/vendor-completions/_hg
+	if [[ "$(DEB_HG_CHG_BY_DEFAULT)" -eq 1 ]]; then \
+		mkdir -p "$(CURDIR)"/debian/mercurial/usr/lib/mercurial; \
+		mv "$(CURDIR)"/debian/mercurial/usr/bin/hg "$(CURDIR)"/debian/mercurial/usr/lib/mercurial/hg; \
+		ln -s chg "$(CURDIR)"/debian/mercurial/usr/bin/hg; \
+	fi
--- a/contrib/packaging/hgpackaging/util.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/packaging/hgpackaging/util.py	Thu Mar 18 18:24:59 2021 -0400
@@ -161,10 +161,10 @@
     >>> normalize_windows_version("5.3rc1")
     '5.3.0.1'
 
-    >>> normalize_windows_version("5.3rc1+2-abcdef")
+    >>> normalize_windows_version("5.3rc1+hg2.abcdef")
     '5.3.0.1'
 
-    >>> normalize_windows_version("5.3+2-abcdef")
+    >>> normalize_windows_version("5.3+hg2.abcdef")
     '5.3.0.2'
     """
     if '+' in version:
@@ -188,8 +188,8 @@
         if rc is not None:
             versions.append(rc)
         elif extra:
-            # <commit count>-<hash>+<date>
-            versions.append(int(extra.split('-')[0]))
+            # hg<commit count>.<hash>+<date>
+            versions.append(int(extra.split('.')[0][2:]))
 
     return '.'.join('%d' % x for x in versions[0:4])
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/perf-utils/search-discovery-case	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,198 @@
+#!/usr/bin/env python3
+# Search for interesting discovery instance
+#
+#  search-discovery-case REPO [REPO]…
+#
+# This use a subsetmaker extension (next to this script) to generate a steam of
+# random discovery instance. When interesting case are discovered, information
+# about them are print on the stdout.
+from __future__ import print_function
+
+import json
+import os
+import queue
+import random
+import signal
+import subprocess
+import sys
+import threading
+
+this_script = os.path.abspath(sys.argv[0])
+this_dir = os.path.dirname(this_script)
+hg_dir = os.path.join(this_dir, '..', '..')
+HG_REPO = os.path.normpath(hg_dir)
+HG_BIN = os.path.join(HG_REPO, 'hg')
+
+JOB = int(os.environ.get('NUMBER_OF_PROCESSORS', 8))
+
+
+SLICING = ('scratch', 'randomantichain', 'rev')
+
+
+def nb_revs(repo_path):
+    cmd = [
+        HG_BIN,
+        '--repository',
+        repo_path,
+        'log',
+        '--template',
+        '{rev}',
+        '--rev',
+        'tip',
+    ]
+    s = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+    out, err = s.communicate()
+    return int(out)
+
+
+repos = []
+for repo in sys.argv[1:]:
+    size = nb_revs(repo)
+    repos.append((repo, size))
+
+
+def pick_one(repo):
+    pick = random.choice(SLICING)
+    seed = random.randint(0, 100000)
+    if pick == 'scratch':
+        start = int(repo[1] * 0.3)
+        end = int(repo[1] * 0.7)
+        nb = random.randint(start, end)
+        return ('scratch', nb, seed)
+    elif pick == 'randomantichain':
+        return ('randomantichain', seed)
+    elif pick == 'rev':
+        start = int(repo[1] * 0.3)
+        end = int(repo[1])
+        rev = random.randint(start, end)
+        return ('rev', rev)
+    else:
+        assert False
+
+
+done = threading.Event()
+cases = queue.Queue(maxsize=10 * JOB)
+results = queue.Queue()
+
+
+def worker():
+    while not done.is_set():
+        c = cases.get()
+        if c is None:
+            return
+        try:
+            res = process(c)
+            results.put((c, res))
+        except Exception as exc:
+            print('processing-failed: %s %s' % (c, exc), file=sys.stderr)
+        c = (c[0], c[2], c[1])
+        try:
+            res = process(c)
+            results.put((c, res))
+        except Exception as exc:
+            print('processing-failed: %s %s' % (c, exc), file=sys.stderr)
+
+
+SUBSET_PATH = os.path.join(HG_REPO, 'contrib', 'perf-utils', 'subsetmaker.py')
+
+
+CMD_BASE = (
+    HG_BIN,
+    'debugdiscovery',
+    '--template',
+    'json',
+    '--config',
+    'extensions.subset=%s' % SUBSET_PATH,
+)
+#    '--local-as-revs "$left" --local-as-revs "$right"'
+#    > /data/discovery-references/results/disco-mozilla-unified-$1-$2.txt
+#        )
+
+
+def to_revsets(case):
+    t = case[0]
+    if t == 'scratch':
+        return 'not scratch(all(), %d, "%d")' % (case[1], case[2])
+    elif t == 'randomantichain':
+        return '::randomantichain(all(), "%d")' % case[1]
+    elif t == 'rev':
+        return '::%d' % case[1]
+    else:
+        assert False
+
+
+def process(case):
+    (repo, left, right) = case
+    cmd = list(CMD_BASE)
+    cmd.append('-R')
+    cmd.append(repo[0])
+    cmd.append('--local-as-revs')
+    cmd.append(to_revsets(left))
+    cmd.append('--remote-as-revs')
+    cmd.append(to_revsets(right))
+    s = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+    out, err = s.communicate()
+    return json.loads(out)[0]
+
+
+def interesting_boundary(res):
+    """check if a case is interesting or not
+
+    For now we are mostly interrested in case were we do multiple roundstrip
+    and where the boundary is somewhere in the middle of the undecided set.
+
+    Ideally, we would make this configurable, but this is not a focus for now
+
+    return None or (round-trip, undecided-common, undecided-missing)
+    """
+    roundtrips = res["total-roundtrips"]
+    if roundtrips <= 1:
+        return None
+    undecided_common = res["nb-ini_und-common"]
+    undecided_missing = res["nb-ini_und-missing"]
+    if undecided_common == 0:
+        return None
+    if undecided_missing == 0:
+        return None
+    return (roundtrips, undecided_common, undecided_missing)
+
+
+def end(*args, **kwargs):
+    done.set()
+
+
+def format_case(case):
+    return '-'.join(str(s) for s in case)
+
+
+signal.signal(signal.SIGINT, end)
+
+for i in range(JOB):
+    threading.Thread(target=worker).start()
+
+nb_cases = 0
+while not done.is_set():
+    repo = random.choice(repos)
+    left = pick_one(repo)
+    right = pick_one(repo)
+    cases.put((repo, left, right))
+    while not results.empty():
+        # results has a single reader so this is fine
+        c, res = results.get_nowait()
+        boundary = interesting_boundary(res)
+        if boundary is not None:
+            print(c[0][0], format_case(c[1]), format_case(c[2]), *boundary)
+            sys.stdout.flush()
+
+    nb_cases += 1
+    if not nb_cases % 100:
+        print('[%d cases generated]' % nb_cases, file=sys.stderr)
+
+for i in range(JOB):
+    try:
+        cases.put_nowait(None)
+    except queue.Full:
+        pass
+
+print('[%d cases generated]' % nb_cases, file=sys.stderr)
+print('[ouput generation is over]' % nb_cases, file=sys.stderr)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/perf-utils/subsetmaker.py	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,170 @@
+"""revset to select sample of repository
+
+Hopefully this is useful to create interesting discovery cases.
+"""
+
+import collections
+import random
+
+from mercurial.i18n import _
+
+from mercurial import (
+    registrar,
+    revset,
+    revsetlang,
+    smartset,
+)
+
+revsetpredicate = registrar.revsetpredicate()
+
+
+@revsetpredicate(b'subsetspec("<spec>")')
+def subsetmarkerspec(repo, subset, x):
+    """use a shorthand spec as used by search-discovery-case
+
+    Supported format are:
+
+    - "scratch-count-seed": not scratch(all(), count, "seed")
+    - "randomantichain-seed": ::randomantichain(all(), "seed")
+    - "rev-REV": "::REV"
+    """
+    args = revsetlang.getargs(
+        x, 0, 1, _(b'subsetspec("spec") required an argument')
+    )
+
+    spec = revsetlang.getstring(args[0], _(b"spec should be a string"))
+    case = spec.split(b'-')
+    t = case[0]
+    if t == b'scratch':
+        spec_revset = b'not scratch(all(), %s, "%s")' % (case[1], case[2])
+    elif t == b'randomantichain':
+        spec_revset = b'::randomantichain(all(), "%s")' % case[1]
+    elif t == b'rev':
+        spec_revset = b'::%d' % case[1]
+    else:
+        assert False, spec
+
+    selected = repo.revs(spec_revset)
+
+    return selected & subset
+
+
+@revsetpredicate(b'scratch(REVS, <count>, [seed])')
+def scratch(repo, subset, x):
+    """randomly remove <count> revision from the repository top
+
+    This subset is created by recursively picking changeset starting from the
+    heads. It can be summarized using the following algorithm::
+
+        selected = set()
+        for i in range(<count>):
+            unselected = repo.revs("not <selected>")
+            candidates = repo.revs("heads(<unselected>)")
+            pick = random.choice(candidates)
+            selected.add(pick)
+    """
+    m = _(b"scratch expects revisions, count argument and an optional seed")
+    args = revsetlang.getargs(x, 2, 3, m)
+    if len(args) == 2:
+        x, n = args
+        rand = random
+    elif len(args) == 3:
+        x, n, seed = args
+        seed = revsetlang.getinteger(seed, _(b"seed should be a number"))
+        rand = random.Random(seed)
+    else:
+        assert False
+
+    n = revsetlang.getinteger(n, _(b"scratch expects a number"))
+
+    selected = set()
+    heads = set()
+    children_count = collections.defaultdict(lambda: 0)
+    parents = repo.changelog._uncheckedparentrevs
+
+    baseset = revset.getset(repo, smartset.fullreposet(repo), x)
+    baseset.sort()
+    for r in baseset:
+        heads.add(r)
+
+        p1, p2 = parents(r)
+        if p1 >= 0:
+            heads.discard(p1)
+            children_count[p1] += 1
+        if p2 >= 0:
+            heads.discard(p2)
+            children_count[p2] += 1
+
+    for h in heads:
+        assert children_count[h] == 0
+
+    selected = set()
+    for x in range(n):
+        if not heads:
+            break
+        pick = rand.choice(list(heads))
+        heads.remove(pick)
+        assert pick not in selected
+        selected.add(pick)
+        p1, p2 = parents(pick)
+        if p1 in children_count:
+            assert p1 in children_count
+            children_count[p1] -= 1
+            assert children_count[p1] >= 0
+            if children_count[p1] == 0:
+                assert p1 not in selected, (r, p1)
+                heads.add(p1)
+        if p2 in children_count:
+            assert p2 in children_count
+            children_count[p2] -= 1
+            assert children_count[p2] >= 0
+            if children_count[p2] == 0:
+                assert p2 not in selected, (r, p2)
+                heads.add(p2)
+
+    return smartset.baseset(selected) & subset
+
+
+@revsetpredicate(b'randomantichain(REVS, [seed])')
+def antichain(repo, subset, x):
+    """Pick a random anti-chain in the repository
+
+    A antichain is a set of changeset where there isn't any element that is
+    either a descendant or ancestors of any other element in the set. In other
+    word, all the elements are independant. It can be summarized with the
+    following algorithm::
+
+    selected = set()
+    unselected = repo.revs('all()')
+    while unselected:
+        pick = random.choice(unselected)
+        selected.add(pick)
+        unselected -= repo.revs('::<pick> + <pick>::')
+    """
+
+    args = revsetlang.getargs(
+        x, 1, 2, _(b"randomantichain expects revisions and an optional seed")
+    )
+    if len(args) == 1:
+        (x,) = args
+        rand = random
+    elif len(args) == 2:
+        x, seed = args
+        seed = revsetlang.getinteger(seed, _(b"seed should be a number"))
+        rand = random.Random(seed)
+    else:
+        assert False
+
+    selected = set()
+
+    baseset = revset.getset(repo, smartset.fullreposet(repo), x)
+    undecided = baseset
+
+    while undecided:
+        pick = rand.choice(list(undecided))
+        selected.add(pick)
+        undecided = repo.revs(
+            '%ld and not (::%ld or %ld::head())', baseset, selected, selected
+        )
+
+    return smartset.baseset(selected) & subset
--- a/contrib/perf.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/perf.py	Thu Mar 18 18:24:59 2021 -0400
@@ -744,7 +744,7 @@
 # perf commands
 
 
-@command(b'perfwalk', formatteropts)
+@command(b'perf::walk|perfwalk', formatteropts)
 def perfwalk(ui, repo, *pats, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
@@ -759,7 +759,7 @@
     fm.end()
 
 
-@command(b'perfannotate', formatteropts)
+@command(b'perf::annotate|perfannotate', formatteropts)
 def perfannotate(ui, repo, f, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
@@ -769,7 +769,7 @@
 
 
 @command(
-    b'perfstatus',
+    b'perf::status|perfstatus',
     [
         (b'u', b'unknown', False, b'ask status to look for unknown files'),
         (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
@@ -806,7 +806,7 @@
     fm.end()
 
 
-@command(b'perfaddremove', formatteropts)
+@command(b'perf::addremove|perfaddremove', formatteropts)
 def perfaddremove(ui, repo, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
@@ -837,7 +837,7 @@
         cl._nodepos = None
 
 
-@command(b'perfheads', formatteropts)
+@command(b'perf::heads|perfheads', formatteropts)
 def perfheads(ui, repo, **opts):
     """benchmark the computation of a changelog heads"""
     opts = _byteskwargs(opts)
@@ -855,7 +855,7 @@
 
 
 @command(
-    b'perftags',
+    b'perf::tags|perftags',
     formatteropts
     + [
         (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
@@ -880,7 +880,7 @@
     fm.end()
 
 
-@command(b'perfancestors', formatteropts)
+@command(b'perf::ancestors|perfancestors', formatteropts)
 def perfancestors(ui, repo, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
@@ -894,7 +894,7 @@
     fm.end()
 
 
-@command(b'perfancestorset', formatteropts)
+@command(b'perf::ancestorset|perfancestorset', formatteropts)
 def perfancestorset(ui, repo, revset, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
@@ -910,7 +910,7 @@
     fm.end()
 
 
-@command(b'perfdiscovery', formatteropts, b'PATH')
+@command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
 def perfdiscovery(ui, repo, path, **opts):
     """benchmark discovery between local repo and the peer at given path"""
     repos = [repo, None]
@@ -928,7 +928,7 @@
 
 
 @command(
-    b'perfbookmarks',
+    b'perf::bookmarks|perfbookmarks',
     formatteropts
     + [
         (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
@@ -953,7 +953,7 @@
     fm.end()
 
 
-@command(b'perfbundleread', formatteropts, b'BUNDLE')
+@command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
 def perfbundleread(ui, repo, bundlepath, **opts):
     """Benchmark reading of bundle files.
 
@@ -1080,7 +1080,7 @@
 
 
 @command(
-    b'perfchangegroupchangelog',
+    b'perf::changegroupchangelog|perfchangegroupchangelog',
     formatteropts
     + [
         (b'', b'cgversion', b'02', b'changegroup version'),
@@ -1116,7 +1116,7 @@
     fm.end()
 
 
-@command(b'perfdirs', formatteropts)
+@command(b'perf::dirs|perfdirs', formatteropts)
 def perfdirs(ui, repo, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
@@ -1132,7 +1132,7 @@
 
 
 @command(
-    b'perfdirstate',
+    b'perf::dirstate|perfdirstate',
     [
         (
             b'',
@@ -1195,7 +1195,7 @@
     fm.end()
 
 
-@command(b'perfdirstatedirs', formatteropts)
+@command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
 def perfdirstatedirs(ui, repo, **opts):
     """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
     opts = _byteskwargs(opts)
@@ -1212,7 +1212,7 @@
     fm.end()
 
 
-@command(b'perfdirstatefoldmap', formatteropts)
+@command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
 def perfdirstatefoldmap(ui, repo, **opts):
     """benchmap a `dirstate._map.filefoldmap.get()` request
 
@@ -1233,7 +1233,7 @@
     fm.end()
 
 
-@command(b'perfdirfoldmap', formatteropts)
+@command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
 def perfdirfoldmap(ui, repo, **opts):
     """benchmap a `dirstate._map.dirfoldmap.get()` request
 
@@ -1255,7 +1255,7 @@
     fm.end()
 
 
-@command(b'perfdirstatewrite', formatteropts)
+@command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
 def perfdirstatewrite(ui, repo, **opts):
     """benchmap the time it take to write a dirstate on disk"""
     opts = _byteskwargs(opts)
@@ -1297,7 +1297,7 @@
 
 
 @command(
-    b'perfmergecalculate',
+    b'perf::mergecalculate|perfmergecalculate',
     [
         (b'r', b'rev', b'.', b'rev to merge against'),
         (b'', b'from', b'', b'rev to merge from'),
@@ -1330,7 +1330,7 @@
 
 
 @command(
-    b'perfmergecopies',
+    b'perf::mergecopies|perfmergecopies',
     [
         (b'r', b'rev', b'.', b'rev to merge against'),
         (b'', b'from', b'', b'rev to merge from'),
@@ -1353,7 +1353,7 @@
     fm.end()
 
 
-@command(b'perfpathcopies', [], b"REV REV")
+@command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
 def perfpathcopies(ui, repo, rev1, rev2, **opts):
     """benchmark the copy tracing logic"""
     opts = _byteskwargs(opts)
@@ -1369,7 +1369,7 @@
 
 
 @command(
-    b'perfphases',
+    b'perf::phases|perfphases',
     [
         (b'', b'full', False, b'include file reading time too'),
     ],
@@ -1394,7 +1394,7 @@
     fm.end()
 
 
-@command(b'perfphasesremote', [], b"[DEST]")
+@command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
 def perfphasesremote(ui, repo, dest=None, **opts):
     """benchmark time needed to analyse phases of the remote server"""
     from mercurial.node import bin
@@ -1407,7 +1407,7 @@
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
 
-    path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
+    path = ui.getpath(dest, default=(b'default-push', b'default'))
     if not path:
         raise error.Abort(
             b'default repository not configured!',
@@ -1455,7 +1455,7 @@
 
 
 @command(
-    b'perfmanifest',
+    b'perf::manifest|perfmanifest',
     [
         (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
         (b'', b'clear-disk', False, b'clear on-disk caches too'),
@@ -1499,7 +1499,7 @@
     fm.end()
 
 
-@command(b'perfchangeset', formatteropts)
+@command(b'perf::changeset|perfchangeset', formatteropts)
 def perfchangeset(ui, repo, rev, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
@@ -1513,7 +1513,7 @@
     fm.end()
 
 
-@command(b'perfignore', formatteropts)
+@command(b'perf::ignore|perfignore', formatteropts)
 def perfignore(ui, repo, **opts):
     """benchmark operation related to computing ignore"""
     opts = _byteskwargs(opts)
@@ -1532,7 +1532,7 @@
 
 
 @command(
-    b'perfindex',
+    b'perf::index|perfindex',
     [
         (b'', b'rev', [], b'revision to be looked up (default tip)'),
         (b'', b'no-lookup', None, b'do not revision lookup post creation'),
@@ -1596,7 +1596,7 @@
 
 
 @command(
-    b'perfnodemap',
+    b'perf::nodemap|perfnodemap',
     [
         (b'', b'rev', [], b'revision to be looked up (default tip)'),
         (b'', b'clear-caches', True, b'clear revlog cache between calls'),
@@ -1667,7 +1667,7 @@
     fm.end()
 
 
-@command(b'perfstartup', formatteropts)
+@command(b'perf::startup|perfstartup', formatteropts)
 def perfstartup(ui, repo, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
@@ -1685,7 +1685,7 @@
     fm.end()
 
 
-@command(b'perfparents', formatteropts)
+@command(b'perf::parents|perfparents', formatteropts)
 def perfparents(ui, repo, **opts):
     """benchmark the time necessary to fetch one changeset's parents.
 
@@ -1712,7 +1712,7 @@
     fm.end()
 
 
-@command(b'perfctxfiles', formatteropts)
+@command(b'perf::ctxfiles|perfctxfiles', formatteropts)
 def perfctxfiles(ui, repo, x, **opts):
     opts = _byteskwargs(opts)
     x = int(x)
@@ -1725,7 +1725,7 @@
     fm.end()
 
 
-@command(b'perfrawfiles', formatteropts)
+@command(b'perf::rawfiles|perfrawfiles', formatteropts)
 def perfrawfiles(ui, repo, x, **opts):
     opts = _byteskwargs(opts)
     x = int(x)
@@ -1739,7 +1739,7 @@
     fm.end()
 
 
-@command(b'perflookup', formatteropts)
+@command(b'perf::lookup|perflookup', formatteropts)
 def perflookup(ui, repo, rev, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
@@ -1748,7 +1748,7 @@
 
 
 @command(
-    b'perflinelogedits',
+    b'perf::linelogedits|perflinelogedits',
     [
         (b'n', b'edits', 10000, b'number of edits'),
         (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
@@ -1786,7 +1786,7 @@
     fm.end()
 
 
-@command(b'perfrevrange', formatteropts)
+@command(b'perf::revrange|perfrevrange', formatteropts)
 def perfrevrange(ui, repo, *specs, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
@@ -1795,7 +1795,7 @@
     fm.end()
 
 
-@command(b'perfnodelookup', formatteropts)
+@command(b'perf::nodelookup|perfnodelookup', formatteropts)
 def perfnodelookup(ui, repo, rev, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
@@ -1814,7 +1814,7 @@
 
 
 @command(
-    b'perflog',
+    b'perf::log|perflog',
     [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
 )
 def perflog(ui, repo, rev=None, **opts):
@@ -1832,7 +1832,7 @@
     fm.end()
 
 
-@command(b'perfmoonwalk', formatteropts)
+@command(b'perf::moonwalk|perfmoonwalk', formatteropts)
 def perfmoonwalk(ui, repo, **opts):
     """benchmark walking the changelog backwards
 
@@ -1851,7 +1851,7 @@
 
 
 @command(
-    b'perftemplating',
+    b'perf::templating|perftemplating',
     [
         (b'r', b'rev', [], b'revisions to run the template on'),
     ]
@@ -1941,7 +1941,7 @@
 
 
 @command(
-    b'perfhelper-mergecopies',
+    b'perf::helper-mergecopies|perfhelper-mergecopies',
     formatteropts
     + [
         (b'r', b'revs', [], b'restrict search to these revisions'),
@@ -2124,7 +2124,7 @@
 
 
 @command(
-    b'perfhelper-pathcopies',
+    b'perf::helper-pathcopies|perfhelper-pathcopies',
     formatteropts
     + [
         (b'r', b'revs', [], b'restrict search to these revisions'),
@@ -2263,7 +2263,7 @@
         _displaystats(ui, opts, entries, alldata)
 
 
-@command(b'perfcca', formatteropts)
+@command(b'perf::cca|perfcca', formatteropts)
 def perfcca(ui, repo, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
@@ -2271,7 +2271,7 @@
     fm.end()
 
 
-@command(b'perffncacheload', formatteropts)
+@command(b'perf::fncacheload|perffncacheload', formatteropts)
 def perffncacheload(ui, repo, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
@@ -2284,7 +2284,7 @@
     fm.end()
 
 
-@command(b'perffncachewrite', formatteropts)
+@command(b'perf::fncachewrite|perffncachewrite', formatteropts)
 def perffncachewrite(ui, repo, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
@@ -2304,7 +2304,7 @@
     fm.end()
 
 
-@command(b'perffncacheencode', formatteropts)
+@command(b'perf::fncacheencode|perffncacheencode', formatteropts)
 def perffncacheencode(ui, repo, **opts):
     opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
@@ -2348,7 +2348,7 @@
 
 
 @command(
-    b'perfbdiff',
+    b'perf::bdiff|perfbdiff',
     revlogopts
     + formatteropts
     + [
@@ -2464,7 +2464,7 @@
 
 
 @command(
-    b'perfunidiff',
+    b'perf::unidiff|perfunidiff',
     revlogopts
     + formatteropts
     + [
@@ -2543,7 +2543,7 @@
     fm.end()
 
 
-@command(b'perfdiffwd', formatteropts)
+@command(b'perf::diffwd|perfdiffwd', formatteropts)
 def perfdiffwd(ui, repo, **opts):
     """Profile diff of working directory changes"""
     opts = _byteskwargs(opts)
@@ -2568,7 +2568,11 @@
     fm.end()
 
 
-@command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
+@command(
+    b'perf::revlogindex|perfrevlogindex',
+    revlogopts + formatteropts,
+    b'-c|-m|FILE',
+)
 def perfrevlogindex(ui, repo, file_=None, **opts):
     """Benchmark operations against a revlog index.
 
@@ -2704,7 +2708,7 @@
 
 
 @command(
-    b'perfrevlogrevisions',
+    b'perf::revlogrevisions|perfrevlogrevisions',
     revlogopts
     + formatteropts
     + [
@@ -2754,7 +2758,7 @@
 
 
 @command(
-    b'perfrevlogwrite',
+    b'perf::revlogwrite|perfrevlogwrite',
     revlogopts
     + formatteropts
     + [
@@ -3047,7 +3051,7 @@
 
 
 @command(
-    b'perfrevlogchunks',
+    b'perf::revlogchunks|perfrevlogchunks',
     revlogopts
     + formatteropts
     + [
@@ -3176,7 +3180,7 @@
 
 
 @command(
-    b'perfrevlogrevision',
+    b'perf::revlogrevision|perfrevlogrevision',
     revlogopts
     + formatteropts
     + [(b'', b'cache', False, b'use caches instead of clearing')],
@@ -3319,7 +3323,7 @@
 
 
 @command(
-    b'perfrevset',
+    b'perf::revset|perfrevset',
     [
         (b'C', b'clear', False, b'clear volatile cache between each call.'),
         (b'', b'contexts', False, b'obtain changectx for each revision'),
@@ -3352,7 +3356,7 @@
 
 
 @command(
-    b'perfvolatilesets',
+    b'perf::volatilesets|perfvolatilesets',
     [
         (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
     ]
@@ -3401,7 +3405,7 @@
 
 
 @command(
-    b'perfbranchmap',
+    b'perf::branchmap|perfbranchmap',
     [
         (b'f', b'full', False, b'Includes build time of subset'),
         (
@@ -3492,7 +3496,7 @@
 
 
 @command(
-    b'perfbranchmapupdate',
+    b'perf::branchmapupdate|perfbranchmapupdate',
     [
         (b'', b'base', [], b'subset of revision to start from'),
         (b'', b'target', [], b'subset of revision to end with'),
@@ -3602,7 +3606,7 @@
 
 
 @command(
-    b'perfbranchmapload',
+    b'perf::branchmapload|perfbranchmapload',
     [
         (b'f', b'filter', b'', b'Specify repoview filter'),
         (b'', b'list', False, b'List brachmap filter caches'),
@@ -3661,19 +3665,19 @@
     fm.end()
 
 
-@command(b'perfloadmarkers')
+@command(b'perf::loadmarkers|perfloadmarkers')
 def perfloadmarkers(ui, repo):
     """benchmark the time to parse the on-disk markers for a repo
 
     Result is the number of markers in the repo."""
     timer, fm = gettimer(ui)
     svfs = getsvfs(repo)
-    timer(lambda: len(obsolete.obsstore(svfs)))
+    timer(lambda: len(obsolete.obsstore(repo, svfs)))
     fm.end()
 
 
 @command(
-    b'perflrucachedict',
+    b'perf::lrucachedict|perflrucachedict',
     formatteropts
     + [
         (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
@@ -3829,7 +3833,7 @@
 
 
 @command(
-    b'perfwrite',
+    b'perf::write|perfwrite',
     formatteropts
     + [
         (b'', b'write-method', b'write', b'ui write method'),
@@ -3892,7 +3896,7 @@
 
 
 @command(
-    b'perfprogress',
+    b'perf::progress|perfprogress',
     formatteropts
     + [
         (b'', b'topic', b'topic', b'topic for progress messages'),
--- a/contrib/python-zstandard/c-ext/bufferutil.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/python-zstandard/c-ext/bufferutil.c	Thu Mar 18 18:24:59 2021 -0400
@@ -758,7 +758,7 @@
 };
 
 void bufferutil_module_init(PyObject* mod) {
-	Py_TYPE(&ZstdBufferWithSegmentsType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdBufferWithSegmentsType, &PyType_Type);
 	if (PyType_Ready(&ZstdBufferWithSegmentsType) < 0) {
 		return;
 	}
@@ -766,7 +766,7 @@
 	Py_INCREF(&ZstdBufferWithSegmentsType);
 	PyModule_AddObject(mod, "BufferWithSegments", (PyObject*)&ZstdBufferWithSegmentsType);
 
-	Py_TYPE(&ZstdBufferSegmentsType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdBufferSegmentsType, &PyType_Type);
 	if (PyType_Ready(&ZstdBufferSegmentsType) < 0) {
 		return;
 	}
@@ -774,7 +774,7 @@
 	Py_INCREF(&ZstdBufferSegmentsType);
 	PyModule_AddObject(mod, "BufferSegments", (PyObject*)&ZstdBufferSegmentsType);
 
-	Py_TYPE(&ZstdBufferSegmentType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdBufferSegmentType, &PyType_Type);
 	if (PyType_Ready(&ZstdBufferSegmentType) < 0) {
 		return;
 	}
@@ -782,7 +782,7 @@
 	Py_INCREF(&ZstdBufferSegmentType);
 	PyModule_AddObject(mod, "BufferSegment", (PyObject*)&ZstdBufferSegmentType);
 
-	Py_TYPE(&ZstdBufferWithSegmentsCollectionType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdBufferWithSegmentsCollectionType, &PyType_Type);
 	if (PyType_Ready(&ZstdBufferWithSegmentsCollectionType) < 0) {
 		return;
 	}
--- a/contrib/python-zstandard/c-ext/compressionchunker.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/python-zstandard/c-ext/compressionchunker.c	Thu Mar 18 18:24:59 2021 -0400
@@ -348,12 +348,12 @@
 };
 
 void compressionchunker_module_init(PyObject* module) {
-	Py_TYPE(&ZstdCompressionChunkerIteratorType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdCompressionChunkerIteratorType, &PyType_Type);
 	if (PyType_Ready(&ZstdCompressionChunkerIteratorType) < 0) {
 		return;
 	}
 
-	Py_TYPE(&ZstdCompressionChunkerType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdCompressionChunkerType, &PyType_Type);
 	if (PyType_Ready(&ZstdCompressionChunkerType) < 0) {
 		return;
 	}
--- a/contrib/python-zstandard/c-ext/compressiondict.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/python-zstandard/c-ext/compressiondict.c	Thu Mar 18 18:24:59 2021 -0400
@@ -400,7 +400,7 @@
 };
 
 void compressiondict_module_init(PyObject* mod) {
-	Py_TYPE(&ZstdCompressionDictType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdCompressionDictType, &PyType_Type);
 	if (PyType_Ready(&ZstdCompressionDictType) < 0) {
 		return;
 	}
--- a/contrib/python-zstandard/c-ext/compressionparams.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/python-zstandard/c-ext/compressionparams.c	Thu Mar 18 18:24:59 2021 -0400
@@ -556,7 +556,7 @@
 };
 
 void compressionparams_module_init(PyObject* mod) {
-	Py_TYPE(&ZstdCompressionParametersType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdCompressionParametersType, &PyType_Type);
 	if (PyType_Ready(&ZstdCompressionParametersType) < 0) {
 		return;
 	}
--- a/contrib/python-zstandard/c-ext/compressionreader.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/python-zstandard/c-ext/compressionreader.c	Thu Mar 18 18:24:59 2021 -0400
@@ -811,7 +811,7 @@
 void compressionreader_module_init(PyObject* mod) {
 	/* TODO make reader a sub-class of io.RawIOBase */
 
-	Py_TYPE(&ZstdCompressionReaderType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdCompressionReaderType, &PyType_Type);
 	if (PyType_Ready(&ZstdCompressionReaderType) < 0) {
 		return;
 	}
--- a/contrib/python-zstandard/c-ext/compressionwriter.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/python-zstandard/c-ext/compressionwriter.c	Thu Mar 18 18:24:59 2021 -0400
@@ -365,7 +365,7 @@
 };
 
 void compressionwriter_module_init(PyObject* mod) {
-	Py_TYPE(&ZstdCompressionWriterType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdCompressionWriterType, &PyType_Type);
 	if (PyType_Ready(&ZstdCompressionWriterType) < 0) {
 		return;
 	}
--- a/contrib/python-zstandard/c-ext/compressobj.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/python-zstandard/c-ext/compressobj.c	Thu Mar 18 18:24:59 2021 -0400
@@ -249,7 +249,7 @@
 };
 
 void compressobj_module_init(PyObject* module) {
-	Py_TYPE(&ZstdCompressionObjType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdCompressionObjType, &PyType_Type);
 	if (PyType_Ready(&ZstdCompressionObjType) < 0) {
 		return;
 	}
--- a/contrib/python-zstandard/c-ext/compressor.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/python-zstandard/c-ext/compressor.c	Thu Mar 18 18:24:59 2021 -0400
@@ -619,7 +619,7 @@
 		goto finally;
 	}
 
-	Py_SIZE(output) = outBuffer.pos;
+	Py_SET_SIZE(output, outBuffer.pos);
 
 finally:
 	PyBuffer_Release(&source);
@@ -1659,7 +1659,7 @@
 };
 
 void compressor_module_init(PyObject* mod) {
-	Py_TYPE(&ZstdCompressorType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdCompressorType, &PyType_Type);
 	if (PyType_Ready(&ZstdCompressorType) < 0) {
 		return;
 	}
--- a/contrib/python-zstandard/c-ext/compressoriterator.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/python-zstandard/c-ext/compressoriterator.c	Thu Mar 18 18:24:59 2021 -0400
@@ -228,7 +228,7 @@
 };
 
 void compressoriterator_module_init(PyObject* mod) {
-	Py_TYPE(&ZstdCompressorIteratorType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdCompressorIteratorType, &PyType_Type);
 	if (PyType_Ready(&ZstdCompressorIteratorType) < 0) {
 		return;
 	}
--- a/contrib/python-zstandard/c-ext/decompressionreader.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/python-zstandard/c-ext/decompressionreader.c	Thu Mar 18 18:24:59 2021 -0400
@@ -774,7 +774,7 @@
 void decompressionreader_module_init(PyObject* mod) {
 	/* TODO make reader a sub-class of io.RawIOBase */
 
-	Py_TYPE(&ZstdDecompressionReaderType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdDecompressionReaderType, &PyType_Type);
 	if (PyType_Ready(&ZstdDecompressionReaderType) < 0) {
 		return;
 	}
--- a/contrib/python-zstandard/c-ext/decompressionwriter.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/python-zstandard/c-ext/decompressionwriter.c	Thu Mar 18 18:24:59 2021 -0400
@@ -288,7 +288,7 @@
 };
 
 void decompressionwriter_module_init(PyObject* mod) {
-	Py_TYPE(&ZstdDecompressionWriterType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdDecompressionWriterType, &PyType_Type);
 	if (PyType_Ready(&ZstdDecompressionWriterType) < 0) {
 		return;
 	}
--- a/contrib/python-zstandard/c-ext/decompressobj.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/python-zstandard/c-ext/decompressobj.c	Thu Mar 18 18:24:59 2021 -0400
@@ -195,7 +195,7 @@
 };
 
 void decompressobj_module_init(PyObject* module) {
-	Py_TYPE(&ZstdDecompressionObjType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdDecompressionObjType, &PyType_Type);
 	if (PyType_Ready(&ZstdDecompressionObjType) < 0) {
 		return;
 	}
--- a/contrib/python-zstandard/c-ext/decompressor.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/python-zstandard/c-ext/decompressor.c	Thu Mar 18 18:24:59 2021 -0400
@@ -1811,7 +1811,7 @@
 };
 
 void decompressor_module_init(PyObject* mod) {
-	Py_TYPE(&ZstdDecompressorType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdDecompressorType, &PyType_Type);
 	if (PyType_Ready(&ZstdDecompressorType) < 0) {
 		return;
 	}
--- a/contrib/python-zstandard/c-ext/decompressoriterator.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/python-zstandard/c-ext/decompressoriterator.c	Thu Mar 18 18:24:59 2021 -0400
@@ -242,7 +242,7 @@
 };
 
 void decompressoriterator_module_init(PyObject* mod) {
-	Py_TYPE(&ZstdDecompressorIteratorType) = &PyType_Type;
+	Py_SET_TYPE(&ZstdDecompressorIteratorType, &PyType_Type);
 	if (PyType_Ready(&ZstdDecompressorIteratorType) < 0) {
 		return;
 	}
--- a/contrib/python-zstandard/c-ext/frameparams.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/python-zstandard/c-ext/frameparams.c	Thu Mar 18 18:24:59 2021 -0400
@@ -128,7 +128,7 @@
 };
 
 void frameparams_module_init(PyObject* mod) {
-	Py_TYPE(&FrameParametersType) = &PyType_Type;
+	Py_SET_TYPE(&FrameParametersType, &PyType_Type);
 	if (PyType_Ready(&FrameParametersType) < 0) {
 		return;
 	}
--- a/contrib/python-zstandard/c-ext/python-zstandard.h	Sat Mar 13 02:09:23 2021 -0500
+++ b/contrib/python-zstandard/c-ext/python-zstandard.h	Thu Mar 18 18:24:59 2021 -0400
@@ -9,6 +9,7 @@
 #define PY_SSIZE_T_CLEAN
 #include <Python.h>
 #include "structmember.h"
+#include <pythoncapi_compat.h>
 
 #define ZSTD_STATIC_LINKING_ONLY
 #define ZDICT_STATIC_LINKING_ONLY
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/pythoncapi_compat.h	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,283 @@
+// Header file providing new functions of the Python C API to old Python
+// versions.
+//
+// File distributed under the MIT license.
+//
+// Homepage:
+// https://github.com/pythoncapi/pythoncapi_compat
+//
+// Latest version:
+// https://raw.githubusercontent.com/pythoncapi/pythoncapi_compat/master/pythoncapi_compat.h
+
+#ifndef PYTHONCAPI_COMPAT
+#define PYTHONCAPI_COMPAT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <Python.h>
+#include "frameobject.h"          // PyFrameObject, PyFrame_GetBack()
+
+
+/* VC 2008 doesn't know about the inline keyword. */
+#if defined(_MSC_VER) && _MSC_VER < 1900
+#define inline __forceinline
+#endif
+
+// Cast argument to PyObject* type.
+#ifndef _PyObject_CAST
+#  define _PyObject_CAST(op) ((PyObject*)(op))
+#endif
+
+
+// bpo-42262 added Py_NewRef() to Python 3.10.0a3
+#if PY_VERSION_HEX < 0x030a00A3 && !defined(Py_NewRef)
+static inline PyObject* _Py_NewRef(PyObject *obj)
+{
+    Py_INCREF(obj);
+    return obj;
+}
+#define Py_NewRef(obj) _Py_NewRef(_PyObject_CAST(obj))
+#endif
+
+
+// bpo-42262 added Py_XNewRef() to Python 3.10.0a3
+#if PY_VERSION_HEX < 0x030a00A3 && !defined(Py_XNewRef)
+static inline PyObject* _Py_XNewRef(PyObject *obj)
+{
+    Py_XINCREF(obj);
+    return obj;
+}
+#define Py_XNewRef(obj) _Py_XNewRef(_PyObject_CAST(obj))
+#endif
+
+
+// bpo-39573 added Py_SET_REFCNT() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_REFCNT)
+static inline void _Py_SET_REFCNT(PyObject *ob, Py_ssize_t refcnt)
+{
+    ob->ob_refcnt = refcnt;
+}
+#define Py_SET_REFCNT(ob, refcnt) _Py_SET_REFCNT((PyObject*)(ob), refcnt)
+#endif
+
+
+// bpo-39573 added Py_SET_TYPE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_TYPE)
+static inline void
+_Py_SET_TYPE(PyObject *ob, PyTypeObject *type)
+{
+    ob->ob_type = type;
+}
+#define Py_SET_TYPE(ob, type) _Py_SET_TYPE((PyObject*)(ob), type)
+#endif
+
+
+// bpo-39573 added Py_SET_SIZE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_SIZE)
+static inline void
+_Py_SET_SIZE(PyVarObject *ob, Py_ssize_t size)
+{
+    ob->ob_size = size;
+}
+#define Py_SET_SIZE(ob, size) _Py_SET_SIZE((PyVarObject*)(ob), size)
+#endif
+
+
+// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyCodeObject*
+PyFrame_GetCode(PyFrameObject *frame)
+{
+    PyCodeObject *code;
+    assert(frame != NULL);
+    code = frame->f_code;
+    assert(code != NULL);
+    Py_INCREF(code);
+    return code;
+}
+#endif
+
+static inline PyCodeObject*
+_PyFrame_GetCodeBorrow(PyFrameObject *frame)
+{
+    PyCodeObject *code = PyFrame_GetCode(frame);
+    Py_DECREF(code);
+    return code;  // borrowed reference
+}
+
+
+// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyFrameObject*
+PyFrame_GetBack(PyFrameObject *frame)
+{
+    PyFrameObject *back;
+    assert(frame != NULL);
+    back = frame->f_back;
+    Py_XINCREF(back);
+    return back;
+}
+#endif
+
+static inline PyFrameObject*
+_PyFrame_GetBackBorrow(PyFrameObject *frame)
+{
+    PyFrameObject *back = PyFrame_GetBack(frame);
+    Py_XDECREF(back);
+    return back;  // borrowed reference
+}
+
+
+// bpo-39947 added PyThreadState_GetInterpreter() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline PyInterpreterState *
+PyThreadState_GetInterpreter(PyThreadState *tstate)
+{
+    assert(tstate != NULL);
+    return tstate->interp;
+}
+#endif
+
+
+// bpo-40429 added PyThreadState_GetFrame() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyFrameObject*
+PyThreadState_GetFrame(PyThreadState *tstate)
+{
+    PyFrameObject *frame;
+    assert(tstate != NULL);
+    frame = tstate->frame;
+    Py_XINCREF(frame);
+    return frame;
+}
+#endif
+
+static inline PyFrameObject*
+_PyThreadState_GetFrameBorrow(PyThreadState *tstate)
+{
+    PyFrameObject *frame = PyThreadState_GetFrame(tstate);
+    Py_XDECREF(frame);
+    return frame;  // borrowed reference
+}
+
+
+// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline PyInterpreterState *
+PyInterpreterState_Get(void)
+{
+    PyThreadState *tstate;
+    PyInterpreterState *interp;
+
+    tstate = PyThreadState_GET();
+    if (tstate == NULL) {
+        Py_FatalError("GIL released (tstate is NULL)");
+    }
+    interp = tstate->interp;
+    if (interp == NULL) {
+        Py_FatalError("no current interpreter");
+    }
+    return interp;
+}
+#endif
+
+
+// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a6
+#if 0x030700A1 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x030900A6
+static inline uint64_t
+PyThreadState_GetID(PyThreadState *tstate)
+{
+    assert(tstate != NULL);
+    return tstate->id;
+}
+#endif
+
+
+// bpo-37194 added PyObject_CallNoArgs() to Python 3.9.0a1
+#if PY_VERSION_HEX < 0x030900A1
+static inline PyObject*
+PyObject_CallNoArgs(PyObject *func)
+{
+    return PyObject_CallFunctionObjArgs(func, NULL);
+}
+#endif
+
+
+// bpo-39245 made PyObject_CallOneArg() public (previously called
+// _PyObject_CallOneArg) in Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4
+static inline PyObject*
+PyObject_CallOneArg(PyObject *func, PyObject *arg)
+{
+    return PyObject_CallFunctionObjArgs(func, arg, NULL);
+}
+#endif
+
+
+// bpo-40024 added PyModule_AddType() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline int
+PyModule_AddType(PyObject *module, PyTypeObject *type)
+{
+    const char *name, *dot;
+
+    if (PyType_Ready(type) < 0) {
+        return -1;
+    }
+
+    // inline _PyType_Name()
+    name = type->tp_name;
+    assert(name != NULL);
+    dot = strrchr(name, '.');
+    if (dot != NULL) {
+        name = dot + 1;
+    }
+
+    Py_INCREF(type);
+    if (PyModule_AddObject(module, name, (PyObject *)type) < 0) {
+        Py_DECREF(type);
+        return -1;
+    }
+
+    return 0;
+}
+#endif
+
+
+// bpo-40241 added PyObject_GC_IsTracked() to Python 3.9.0a6.
+// bpo-4688 added _PyObject_GC_IS_TRACKED() to Python 2.7.0a2.
+#if PY_VERSION_HEX < 0x030900A6
+static inline int
+PyObject_GC_IsTracked(PyObject* obj)
+{
+    return (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj));
+}
+#endif
+
+// bpo-40241 added PyObject_GC_IsFinalized() to Python 3.9.0a6.
+// bpo-18112 added _PyGCHead_FINALIZED() to Python 3.4.0 final.
+#if PY_VERSION_HEX < 0x030900A6 && PY_VERSION_HEX >= 0x030400F0
+static inline int
+PyObject_GC_IsFinalized(PyObject *obj)
+{
+    return (PyObject_IS_GC(obj) && _PyGCHead_FINALIZED((PyGC_Head *)(obj)-1));
+}
+#endif
+
+
+// bpo-39573 added Py_IS_TYPE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_IS_TYPE)
+static inline int
+_Py_IS_TYPE(const PyObject *ob, const PyTypeObject *type) {
+    return ob->ob_type == type;
+}
+#define Py_IS_TYPE(ob, type) _Py_IS_TYPE((const PyObject*)(ob), type)
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif  // PYTHONCAPI_COMPAT
--- a/doc/gendoc.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/doc/gendoc.py	Thu Mar 18 18:24:59 2021 -0400
@@ -31,6 +31,7 @@
     commands,
     encoding,
     extensions,
+    fancyopts,
     help,
     minirst,
     pycompat,
@@ -86,6 +87,8 @@
         if b'\n' in desc:
             # only remove line breaks and indentation
             desc = b' '.join(l.lstrip() for l in desc.split(b'\n'))
+        if isinstance(default, fancyopts.customopt):
+            default = default.getdefaultvalue()
         if default:
             default = stringutil.forcebytestr(default)
             desc += _(b" (default: %s)") % default
@@ -314,7 +317,12 @@
                 ui.write(b"\n")
             # aliases
             if d[b'aliases']:
-                ui.write(_(b"    aliases: %s\n\n") % b" ".join(d[b'aliases']))
+                # Note the empty comment, this is required to separate this
+                # (which should be a blockquote) from any preceding things (such
+                # as a definition list).
+                ui.write(
+                    _(b"..\n\n    aliases: %s\n\n") % b" ".join(d[b'aliases'])
+                )
 
 
 def allextensionnames():
@@ -327,6 +335,11 @@
         doc = encoding.strtolocal(sys.argv[1])
 
     ui = uimod.ui.load()
+    # Trigger extensions to load. This is disabled by default because it uses
+    # the current user's configuration, which is often not what is wanted.
+    if encoding.environ.get(b'GENDOC_LOAD_CONFIGURED_EXTENSIONS', b'0') != b'0':
+        extensions.loadall(ui)
+
     if doc == b'hg.1.gendoc':
         showdoc(ui)
     else:
--- a/hgext/absorb.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/absorb.py	Thu Mar 18 18:24:59 2021 -0400
@@ -102,6 +102,9 @@
 class emptyfilecontext(object):
     """minimal filecontext representing an empty file"""
 
+    def __init__(self, repo):
+        self._repo = repo
+
     def data(self):
         return b''
 
@@ -212,7 +215,7 @@
         if path in pctx:
             fctxs.append(pctx[path])
         else:
-            fctxs.append(emptyfilecontext())
+            fctxs.append(emptyfilecontext(pctx.repo()))
 
     fctxs.reverse()
     # note: we rely on a property of hg: filerev is not reused for linear
--- a/hgext/blackbox.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/blackbox.py	Thu Mar 18 18:24:59 2021 -0400
@@ -38,7 +38,7 @@
   [blackbox]
   # Include nanoseconds in log entries with %f (see Python function
   # datetime.datetime.strftime)
-  date-format = '%Y-%m-%d @ %H:%M:%S.%f'
+  date-format = %Y-%m-%d @ %H:%M:%S.%f
 
 """
 
--- a/hgext/churn.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/churn.py	Thu Mar 18 18:24:59 2021 -0400
@@ -38,11 +38,16 @@
 def changedlines(ui, repo, ctx1, ctx2, fmatch):
     added, removed = 0, 0
     diff = b''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
+    inhunk = False
     for l in diff.split(b'\n'):
-        if l.startswith(b"+") and not l.startswith(b"+++ "):
+        if inhunk and l.startswith(b"+"):
             added += 1
-        elif l.startswith(b"-") and not l.startswith(b"--- "):
+        elif inhunk and l.startswith(b"-"):
             removed += 1
+        elif l.startswith(b"@"):
+            inhunk = True
+        elif l.startswith(b"d"):
+            inhunk = False
     return (added, removed)
 
 
--- a/hgext/convert/__init__.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/convert/__init__.py	Thu Mar 18 18:24:59 2021 -0400
@@ -491,6 +491,22 @@
 
     :convert.skiptags: does not convert tags from the source repo to the target
         repo. The default is False.
+
+    Subversion Destination
+    ######################
+
+    Original commit dates are not preserved by default.
+
+    :convert.svn.dangerous-set-commit-dates: preserve original commit dates,
+        forcefully setting ``svn:date`` revision properties. This option is
+        DANGEROUS and may break some subversion functionality for the resulting
+        repository (e.g. filtering revisions with date ranges in ``svn log``),
+        as original commit dates are not guaranteed to be monotonically
+        increasing.
+
+    For commit dates setting to work destination repository must have
+    ``pre-revprop-change`` hook configured to allow setting of ``svn:date``
+    revision properties. See Subversion documentation for more details.
     """
     return convcmd.convert(ui, src, dest, revmapfile, **opts)
 
--- a/hgext/convert/git.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/convert/git.py	Thu Mar 18 18:24:59 2021 -0400
@@ -247,7 +247,8 @@
             b'\n'.join(line.strip() for line in content.split(b'\n')),
         )
         for sec in c.sections():
-            s = c[sec]
+            # turn the config object into a real dict
+            s = dict(c.items(sec))
             if b'url' in s and b'path' in s:
                 self.submodules.append(submodule(s[b'path'], b'', s[b'url']))
 
--- a/hgext/convert/subversion.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/convert/subversion.py	Thu Mar 18 18:24:59 2021 -0400
@@ -97,6 +97,17 @@
         return s.decode(fsencoding).encode('utf-8')
 
 
+def formatsvndate(date):
+    return dateutil.datestr(date, b'%Y-%m-%dT%H:%M:%S.000000Z')
+
+
+def parsesvndate(s):
+    # Example SVN datetime. Includes microseconds.
+    # ISO-8601 conformant
+    # '2007-01-04T17:35:00.902377Z'
+    return dateutil.parsedate(s[:19] + b' UTC', [b'%Y-%m-%dT%H:%M:%S'])
+
+
 class SvnPathNotFound(Exception):
     pass
 
@@ -1158,12 +1169,7 @@
                     continue
                 paths.append((path, ent))
 
-            # Example SVN datetime. Includes microseconds.
-            # ISO-8601 conformant
-            # '2007-01-04T17:35:00.902377Z'
-            date = dateutil.parsedate(
-                date[:19] + b" UTC", [b"%Y-%m-%dT%H:%M:%S"]
-            )
+            date = parsesvndate(date)
             if self.ui.configbool(b'convert', b'localtimezone'):
                 date = makedatetimestamp(date[0])
 
@@ -1380,7 +1386,7 @@
         return logstream(stdout)
 
 
-pre_revprop_change = b'''#!/bin/sh
+pre_revprop_change_template = b'''#!/bin/sh
 
 REPOS="$1"
 REV="$2"
@@ -1388,15 +1394,26 @@
 PROPNAME="$4"
 ACTION="$5"
 
-if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
-if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
-if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
+%(rules)s
 
 echo "Changing prohibited revision property" >&2
 exit 1
 '''
 
 
+def gen_pre_revprop_change_hook(prop_actions_allowed):
+    rules = []
+    for action, propname in prop_actions_allowed:
+        rules.append(
+            (
+                b'if [ "$ACTION" = "%s" -a "$PROPNAME" = "%s" ]; '
+                b'then exit 0; fi'
+            )
+            % (action, propname)
+        )
+    return pre_revprop_change_template % {b'rules': b'\n'.join(rules)}
+
+
 class svn_sink(converter_sink, commandline):
     commit_re = re.compile(br'Committed revision (\d+).', re.M)
     uuid_re = re.compile(br'Repository UUID:\s*(\S+)', re.M)
@@ -1470,9 +1487,20 @@
             self.is_exec = None
 
         if created:
+            prop_actions_allowed = [
+                (b'M', b'svn:log'),
+                (b'A', b'hg:convert-branch'),
+                (b'A', b'hg:convert-rev'),
+            ]
+
+            if self.ui.configbool(
+                b'convert', b'svn.dangerous-set-commit-dates'
+            ):
+                prop_actions_allowed.append((b'M', b'svn:date'))
+
             hook = os.path.join(created, b'hooks', b'pre-revprop-change')
             fp = open(hook, b'wb')
-            fp.write(pre_revprop_change)
+            fp.write(gen_pre_revprop_change_hook(prop_actions_allowed))
             fp.close()
             util.setflags(hook, False, True)
 
@@ -1667,6 +1695,23 @@
                     revprop=True,
                     revision=rev,
                 )
+
+            if self.ui.configbool(
+                b'convert', b'svn.dangerous-set-commit-dates'
+            ):
+                # Subverson always uses UTC to represent date and time
+                date = dateutil.parsedate(commit.date)
+                date = (date[0], 0)
+
+                # The only way to set date and time for svn commit is to use propset after commit is done
+                self.run(
+                    b'propset',
+                    b'svn:date',
+                    formatsvndate(date),
+                    revprop=True,
+                    revision=rev,
+                )
+
             for parent in parents:
                 self.addchild(parent, rev)
             return self.revid(rev)
--- a/hgext/fix.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/fix.py	Thu Mar 18 18:24:59 2021 -0400
@@ -433,8 +433,9 @@
     if not (len(revs) == 1 and wdirrev in revs):
         cmdutil.checkunfinished(repo)
         rewriteutil.precheck(repo, revs, b'fix')
-    if wdirrev in revs and list(
-        mergestatemod.mergestate.read(repo).unresolved()
+    if (
+        wdirrev in revs
+        and mergestatemod.mergestate.read(repo).unresolvedcount()
     ):
         raise error.Abort(b'unresolved conflicts', hint=b"use 'hg resolve'")
     if not revs:
--- a/hgext/git/__init__.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/git/__init__.py	Thu Mar 18 18:24:59 2021 -0400
@@ -90,7 +90,7 @@
             return os.path.join(self.path, b'..', b'.hg', f)
         raise NotImplementedError(b'Need to pick file for %s.' % f)
 
-    def changelog(self, trypending):
+    def changelog(self, trypending, concurrencychecker):
         # TODO we don't have a plan for trypending in hg's git support yet
         return gitlog.changelog(self.git, self._db)
 
--- a/hgext/git/gitlog.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/git/gitlog.py	Thu Mar 18 18:24:59 2021 -0400
@@ -8,6 +8,7 @@
     nullhex,
     nullid,
     nullrev,
+    sha1nodeconstants,
     wdirhex,
 )
 from mercurial import (
@@ -422,6 +423,8 @@
 
 
 class manifestlog(baselog):
+    nodeconstants = sha1nodeconstants
+
     def __getitem__(self, node):
         return self.get(b'', node)
 
--- a/hgext/histedit.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/histedit.py	Thu Mar 18 18:24:59 2021 -0400
@@ -1581,10 +1581,19 @@
     def layout(mode):
         maxy, maxx = stdscr.getmaxyx()
         helplen = len(helplines(mode))
+        mainlen = maxy - helplen - 12
+        if mainlen < 1:
+            raise error.Abort(
+                _(b"terminal dimensions %d by %d too small for curses histedit")
+                % (maxy, maxx),
+                hint=_(
+                    b"enlarge your terminal or use --config ui.interface=text"
+                ),
+            )
         return {
             b'commit': (12, maxx),
             b'help': (helplen, maxx),
-            b'main': (maxy - helplen - 12, maxx),
+            b'main': (mainlen, maxx),
         }
 
     def drawvertwin(size, y, x):
@@ -1614,63 +1623,60 @@
     stdscr.clear()
     stdscr.refresh()
     while True:
-        try:
-            oldmode, _ = state[b'mode']
-            if oldmode == MODE_INIT:
-                changemode(state, MODE_RULES)
-            e = event(state, ch)
-
-            if e == E_QUIT:
-                return False
-            if e == E_HISTEDIT:
-                return state[b'rules']
+        oldmode, unused = state[b'mode']
+        if oldmode == MODE_INIT:
+            changemode(state, MODE_RULES)
+        e = event(state, ch)
+
+        if e == E_QUIT:
+            return False
+        if e == E_HISTEDIT:
+            return state[b'rules']
+        else:
+            if e == E_RESIZE:
+                size = screen_size()
+                if size != stdscr.getmaxyx():
+                    curses.resizeterm(*size)
+
+            curmode, unused = state[b'mode']
+            sizes = layout(curmode)
+            if curmode != oldmode:
+                state[b'page_height'] = sizes[b'main'][0]
+                # Adjust the view to fit the current screen size.
+                movecursor(state, state[b'pos'], state[b'pos'])
+
+            # Pack the windows against the top, each pane spread across the
+            # full width of the screen.
+            y, x = (0, 0)
+            helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
+            mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
+            commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
+
+            if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
+                if e == E_PAGEDOWN:
+                    changeview(state, +1, b'page')
+                elif e == E_PAGEUP:
+                    changeview(state, -1, b'page')
+                elif e == E_LINEDOWN:
+                    changeview(state, +1, b'line')
+                elif e == E_LINEUP:
+                    changeview(state, -1, b'line')
+
+            # start rendering
+            commitwin.erase()
+            helpwin.erase()
+            mainwin.erase()
+            if curmode == MODE_PATCH:
+                renderpatch(mainwin, state)
+            elif curmode == MODE_HELP:
+                renderstring(mainwin, state, __doc__.strip().splitlines())
             else:
-                if e == E_RESIZE:
-                    size = screen_size()
-                    if size != stdscr.getmaxyx():
-                        curses.resizeterm(*size)
-
-                curmode, _ = state[b'mode']
-                sizes = layout(curmode)
-                if curmode != oldmode:
-                    state[b'page_height'] = sizes[b'main'][0]
-                    # Adjust the view to fit the current screen size.
-                    movecursor(state, state[b'pos'], state[b'pos'])
-
-                # Pack the windows against the top, each pane spread across the
-                # full width of the screen.
-                y, x = (0, 0)
-                helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
-                mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
-                commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
-
-                if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
-                    if e == E_PAGEDOWN:
-                        changeview(state, +1, b'page')
-                    elif e == E_PAGEUP:
-                        changeview(state, -1, b'page')
-                    elif e == E_LINEDOWN:
-                        changeview(state, +1, b'line')
-                    elif e == E_LINEUP:
-                        changeview(state, -1, b'line')
-
-                # start rendering
-                commitwin.erase()
-                helpwin.erase()
-                mainwin.erase()
-                if curmode == MODE_PATCH:
-                    renderpatch(mainwin, state)
-                elif curmode == MODE_HELP:
-                    renderstring(mainwin, state, __doc__.strip().splitlines())
-                else:
-                    renderrules(mainwin, state)
-                    rendercommit(commitwin, state)
-                renderhelp(helpwin, state)
-                curses.doupdate()
-                # done rendering
-                ch = encoding.strtolocal(stdscr.getkey())
-        except curses.error:
-            pass
+                renderrules(mainwin, state)
+                rendercommit(commitwin, state)
+            renderhelp(helpwin, state)
+            curses.doupdate()
+            # done rendering
+            ch = encoding.strtolocal(stdscr.getkey())
 
 
 def _chistedit(ui, repo, freeargs, opts):
--- a/hgext/infinitepush/__init__.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/infinitepush/__init__.py	Thu Mar 18 18:24:59 2021 -0400
@@ -704,16 +704,19 @@
 
         if scratchbookmarks:
             other = hg.peer(repo, opts, source)
-            fetchedbookmarks = other.listkeyspatterns(
-                b'bookmarks', patterns=scratchbookmarks
-            )
-            for bookmark in scratchbookmarks:
-                if bookmark not in fetchedbookmarks:
-                    raise error.Abort(
-                        b'remote bookmark %s not found!' % bookmark
-                    )
-                scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
-                revs.append(fetchedbookmarks[bookmark])
+            try:
+                fetchedbookmarks = other.listkeyspatterns(
+                    b'bookmarks', patterns=scratchbookmarks
+                )
+                for bookmark in scratchbookmarks:
+                    if bookmark not in fetchedbookmarks:
+                        raise error.Abort(
+                            b'remote bookmark %s not found!' % bookmark
+                        )
+                    scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
+                    revs.append(fetchedbookmarks[bookmark])
+            finally:
+                other.close()
         opts[b'bookmark'] = bookmarks
         opts[b'rev'] = revs
 
@@ -834,7 +837,7 @@
                 exchange, b'_localphasemove', _phasemove
             )
         # Copy-paste from `push` command
-        path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
+        path = ui.getpath(dest, default=(b'default-push', b'default'))
         if not path:
             raise error.Abort(
                 _(b'default repository not configured!'),
@@ -848,10 +851,13 @@
         if common.isremotebooksenabled(ui):
             if bookmark and scratchpush:
                 other = hg.peer(repo, opts, destpath)
-                fetchedbookmarks = other.listkeyspatterns(
-                    b'bookmarks', patterns=[bookmark]
-                )
-                remotescratchbookmarks.update(fetchedbookmarks)
+                try:
+                    fetchedbookmarks = other.listkeyspatterns(
+                        b'bookmarks', patterns=[bookmark]
+                    )
+                    remotescratchbookmarks.update(fetchedbookmarks)
+                finally:
+                    other.close()
             _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
     if oldphasemove:
         exchange._localphasemove = oldphasemove
--- a/hgext/largefiles/lfutil.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/largefiles/lfutil.py	Thu Mar 18 18:24:59 2021 -0400
@@ -206,6 +206,7 @@
         repo.root,
         repo.dirstate._validate,
         lambda: sparse.matcher(repo),
+        repo.nodeconstants,
     )
 
     # If the largefiles dirstate does not exist, populate and create
--- a/hgext/largefiles/overrides.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/largefiles/overrides.py	Thu Mar 18 18:24:59 2021 -0400
@@ -1567,7 +1567,7 @@
 
 # Calling purge with --all will cause the largefiles to be deleted.
 # Override repo.status to prevent this from happening.
-@eh.wrapcommand(b'purge', extension=b'purge')
+@eh.wrapcommand(b'purge')
 def overridepurge(orig, ui, repo, *dirs, **opts):
     # XXX Monkey patching a repoview will not work. The assigned attribute will
     # be set on the unfiltered repo, but we will only lookup attributes in the
--- a/hgext/lfs/wrapper.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/lfs/wrapper.py	Thu Mar 18 18:24:59 2021 -0400
@@ -116,10 +116,10 @@
     if hgmeta or text.startswith(b'\1\n'):
         text = storageutil.packmeta(hgmeta, text)
 
-    return (text, True, {})
+    return (text, True)
 
 
-def writetostore(self, text, sidedata):
+def writetostore(self, text):
     # hg filelog metadata (includes rename, etc)
     hgmeta, offset = storageutil.parsemeta(text)
     if offset and offset > 0:
--- a/hgext/narrow/narrowcommands.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/narrow/narrowcommands.py	Thu Mar 18 18:24:59 2021 -0400
@@ -214,6 +214,7 @@
     newincludes,
     newexcludes,
     force,
+    backup,
 ):
     oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
     newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
@@ -272,7 +273,7 @@
                 hg.clean(repo, urev)
             overrides = {(b'devel', b'strip-obsmarkers'): False}
             with ui.configoverride(overrides, b'narrow'):
-                repair.strip(ui, unfi, tostrip, topic=b'narrow')
+                repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup)
 
         todelete = []
         for f, f2, size in repo.store.datafiles():
@@ -442,6 +443,12 @@
         ),
         (
             b'',
+            b'backup',
+            True,
+            _(b'back up local changes when narrowing'),
+        ),
+        (
+            b'',
             b'update-working-copy',
             False,
             _(b'update working copy when the store has changed'),
@@ -588,76 +595,83 @@
         ui.status(_(b'comparing with %s\n') % util.hidepassword(url))
         remote = hg.peer(repo, opts, url)
 
-        # check narrow support before doing anything if widening needs to be
-        # performed. In future we should also abort if client is ellipses and
-        # server does not support ellipses
-        if widening and wireprototypes.NARROWCAP not in remote.capabilities():
-            raise error.Abort(_(b"server does not support narrow clones"))
+        try:
+            # check narrow support before doing anything if widening needs to be
+            # performed. In future we should also abort if client is ellipses and
+            # server does not support ellipses
+            if (
+                widening
+                and wireprototypes.NARROWCAP not in remote.capabilities()
+            ):
+                raise error.Abort(_(b"server does not support narrow clones"))
 
-        commoninc = discovery.findcommonincoming(repo, remote)
+            commoninc = discovery.findcommonincoming(repo, remote)
 
-        if autoremoveincludes:
-            outgoing = discovery.findcommonoutgoing(
-                repo, remote, commoninc=commoninc
-            )
-            ui.status(_(b'looking for unused includes to remove\n'))
-            localfiles = set()
-            for n in itertools.chain(outgoing.missing, outgoing.excluded):
-                localfiles.update(repo[n].files())
-            suggestedremovals = []
-            for include in sorted(oldincludes):
-                match = narrowspec.match(repo.root, [include], oldexcludes)
-                if not any(match(f) for f in localfiles):
-                    suggestedremovals.append(include)
-            if suggestedremovals:
-                for s in suggestedremovals:
-                    ui.status(b'%s\n' % s)
-                if (
-                    ui.promptchoice(
-                        _(
-                            b'remove these unused includes (yn)?'
-                            b'$$ &Yes $$ &No'
+            if autoremoveincludes:
+                outgoing = discovery.findcommonoutgoing(
+                    repo, remote, commoninc=commoninc
+                )
+                ui.status(_(b'looking for unused includes to remove\n'))
+                localfiles = set()
+                for n in itertools.chain(outgoing.missing, outgoing.excluded):
+                    localfiles.update(repo[n].files())
+                suggestedremovals = []
+                for include in sorted(oldincludes):
+                    match = narrowspec.match(repo.root, [include], oldexcludes)
+                    if not any(match(f) for f in localfiles):
+                        suggestedremovals.append(include)
+                if suggestedremovals:
+                    for s in suggestedremovals:
+                        ui.status(b'%s\n' % s)
+                    if (
+                        ui.promptchoice(
+                            _(
+                                b'remove these unused includes (yn)?'
+                                b'$$ &Yes $$ &No'
+                            )
                         )
-                    )
-                    == 0
-                ):
-                    removedincludes.update(suggestedremovals)
-                    narrowing = True
-            else:
-                ui.status(_(b'found no unused includes\n'))
+                        == 0
+                    ):
+                        removedincludes.update(suggestedremovals)
+                        narrowing = True
+                else:
+                    ui.status(_(b'found no unused includes\n'))
 
-        if narrowing:
-            newincludes = oldincludes - removedincludes
-            newexcludes = oldexcludes | addedexcludes
-            _narrow(
-                ui,
-                repo,
-                remote,
-                commoninc,
-                oldincludes,
-                oldexcludes,
-                newincludes,
-                newexcludes,
-                opts[b'force_delete_local_changes'],
-            )
-            # _narrow() updated the narrowspec and _widen() below needs to
-            # use the updated values as its base (otherwise removed includes
-            # and addedexcludes will be lost in the resulting narrowspec)
-            oldincludes = newincludes
-            oldexcludes = newexcludes
+            if narrowing:
+                newincludes = oldincludes - removedincludes
+                newexcludes = oldexcludes | addedexcludes
+                _narrow(
+                    ui,
+                    repo,
+                    remote,
+                    commoninc,
+                    oldincludes,
+                    oldexcludes,
+                    newincludes,
+                    newexcludes,
+                    opts[b'force_delete_local_changes'],
+                    opts[b'backup'],
+                )
+                # _narrow() updated the narrowspec and _widen() below needs to
+                # use the updated values as its base (otherwise removed includes
+                # and addedexcludes will be lost in the resulting narrowspec)
+                oldincludes = newincludes
+                oldexcludes = newexcludes
 
-        if widening:
-            newincludes = oldincludes | addedincludes
-            newexcludes = oldexcludes - removedexcludes
-            _widen(
-                ui,
-                repo,
-                remote,
-                commoninc,
-                oldincludes,
-                oldexcludes,
-                newincludes,
-                newexcludes,
-            )
+            if widening:
+                newincludes = oldincludes | addedincludes
+                newexcludes = oldexcludes - removedexcludes
+                _widen(
+                    ui,
+                    repo,
+                    remote,
+                    commoninc,
+                    oldincludes,
+                    oldexcludes,
+                    newincludes,
+                    newexcludes,
+                )
+        finally:
+            remote.close()
 
     return 0
--- a/hgext/purge.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/purge.py	Thu Mar 18 18:24:59 2021 -0400
@@ -22,115 +22,11 @@
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, see <http://www.gnu.org/licenses/>.
 
-'''command to delete untracked files from the working directory'''
-from __future__ import absolute_import
-
-from mercurial.i18n import _
-from mercurial import (
-    cmdutil,
-    merge as mergemod,
-    pycompat,
-    registrar,
-    scmutil,
-)
-
-cmdtable = {}
-command = registrar.command(cmdtable)
-# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
-# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
-# be specifying the version(s) of Mercurial they are tested with, or
-# leave the attribute unspecified.
-testedwith = b'ships-with-hg-core'
-
-
-@command(
-    b'purge|clean',
-    [
-        (b'a', b'abort-on-err', None, _(b'abort if an error occurs')),
-        (b'', b'all', None, _(b'purge ignored files too')),
-        (b'i', b'ignored', None, _(b'purge only ignored files')),
-        (b'', b'dirs', None, _(b'purge empty directories')),
-        (b'', b'files', None, _(b'purge files')),
-        (b'p', b'print', None, _(b'print filenames instead of deleting them')),
-        (
-            b'0',
-            b'print0',
-            None,
-            _(
-                b'end filenames with NUL, for use with xargs'
-                b' (implies -p/--print)'
-            ),
-        ),
-    ]
-    + cmdutil.walkopts,
-    _(b'hg purge [OPTION]... [DIR]...'),
-    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
-)
-def purge(ui, repo, *dirs, **opts):
-    """removes files not tracked by Mercurial
-
-    Delete files not known to Mercurial. This is useful to test local
-    and uncommitted changes in an otherwise-clean source tree.
-
-    This means that purge will delete the following by default:
-
-    - Unknown files: files marked with "?" by :hg:`status`
-    - Empty directories: in fact Mercurial ignores directories unless
-      they contain files under source control management
+'''command to delete untracked files from the working directory (DEPRECATED)
 
-    But it will leave untouched:
-
-    - Modified and unmodified tracked files
-    - Ignored files (unless -i or --all is specified)
-    - New files added to the repository (with :hg:`add`)
-
-    The --files and --dirs options can be used to direct purge to delete
-    only files, only directories, or both. If neither option is given,
-    both will be deleted.
-
-    If directories are given on the command line, only files in these
-    directories are considered.
-
-    Be careful with purge, as you could irreversibly delete some files
-    you forgot to add to the repository. If you only want to print the
-    list of files that this program would delete, use the --print
-    option.
-    """
-    opts = pycompat.byteskwargs(opts)
-    cmdutil.check_at_most_one_arg(opts, b'all', b'ignored')
+The functionality of this extension has been included in core Mercurial since
+version 5.7. Please use :hg:`purge ...` instead. :hg:`purge --confirm` is now the default, unless the extension is enabled for backward compatibility.
+'''
 
-    act = not opts.get(b'print')
-    eol = b'\n'
-    if opts.get(b'print0'):
-        eol = b'\0'
-        act = False  # --print0 implies --print
-    if opts.get(b'all', False):
-        ignored = True
-        unknown = True
-    else:
-        ignored = opts.get(b'ignored', False)
-        unknown = not ignored
-
-    removefiles = opts.get(b'files')
-    removedirs = opts.get(b'dirs')
-
-    if not removefiles and not removedirs:
-        removefiles = True
-        removedirs = True
-
-    match = scmutil.match(repo[None], dirs, opts)
-
-    paths = mergemod.purge(
-        repo,
-        match,
-        unknown=unknown,
-        ignored=ignored,
-        removeemptydirs=removedirs,
-        removefiles=removefiles,
-        abortonerror=opts.get(b'abort_on_err'),
-        noop=not act,
-    )
-
-    for path in paths:
-        if not act:
-            ui.write(b'%s%s' % (path, eol))
+# This empty extension looks pointless, but core mercurial checks if it's loaded
+# to implement the slightly different behavior documented above.
--- a/hgext/rebase.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/rebase.py	Thu Mar 18 18:24:59 2021 -0400
@@ -67,6 +67,14 @@
 
 cmdtable = {}
 command = registrar.command(cmdtable)
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+configitem(
+    b'devel',
+    b'rebase.force-in-memory-merge',
+    default=False,
+)
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
@@ -136,7 +144,7 @@
         return smartset.baseset()
     dests = destutil.orphanpossibledestination(repo, src)
     if len(dests) > 1:
-        raise error.Abort(
+        raise error.StateError(
             _(b"ambiguous automatic rebase: %r could end up on any of %r")
             % (src, dests)
         )
@@ -416,7 +424,7 @@
         if self.collapsef:
             dests = set(self.destmap.values())
             if len(dests) != 1:
-                raise error.Abort(
+                raise error.InputError(
                     _(b'--collapse does not work with multiple destinations')
                 )
             destrev = next(iter(dests))
@@ -461,12 +469,12 @@
                 for rev in self.state:
                     branches.add(repo[rev].branch())
                     if len(branches) > 1:
-                        raise error.Abort(
+                        raise error.InputError(
                             _(b'cannot collapse multiple named branches')
                         )
 
         # Calculate self.obsoletenotrebased
-        obsrevs = _filterobsoleterevs(self.repo, self.state)
+        obsrevs = {r for r in self.state if self.repo[r].obsolete()}
         self._handleskippingobsolete(obsrevs, self.destmap)
 
         # Keep track of the active bookmarks in order to reset them later
@@ -1085,10 +1093,10 @@
         with repo.wlock(), repo.lock():
             rbsrt.restorestatus()
             if rbsrt.collapsef:
-                raise error.Abort(_(b"cannot stop in --collapse session"))
+                raise error.StateError(_(b"cannot stop in --collapse session"))
             allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
             if not (rbsrt.keepf or allowunstable):
-                raise error.Abort(
+                raise error.StateError(
                     _(
                         b"cannot remove original changesets with"
                         b" unrebased descendants"
@@ -1112,6 +1120,8 @@
             with ui.configoverride(overrides, b'rebase'):
                 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
         except error.InMemoryMergeConflictsError:
+            if ui.configbool(b'devel', b'rebase.force-in-memory-merge'):
+                raise
             ui.warn(
                 _(
                     b'hit merge conflicts; re-running rebase without in-memory'
@@ -1210,14 +1220,16 @@
                 )
                 % help
             )
-            raise error.Abort(msg)
+            raise error.InputError(msg)
 
         if rbsrt.collapsemsg and not rbsrt.collapsef:
-            raise error.Abort(_(b'message can only be specified with collapse'))
+            raise error.InputError(
+                _(b'message can only be specified with collapse')
+            )
 
         if action:
             if rbsrt.collapsef:
-                raise error.Abort(
+                raise error.InputError(
                     _(b'cannot use collapse with continue or abort')
                 )
             if action == b'abort' and opts.get(b'tool', False):
@@ -1284,7 +1296,7 @@
         cmdutil.bailifchanged(repo)
 
     if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
-        raise error.Abort(
+        raise error.InputError(
             _(b'you must specify a destination'),
             hint=_(b'use: hg rebase -d REV'),
         )
@@ -1378,7 +1390,7 @@
             return None
 
     if wdirrev in rebaseset:
-        raise error.Abort(_(b'cannot rebase the working copy'))
+        raise error.InputError(_(b'cannot rebase the working copy'))
     rebasingwcp = repo[b'.'].rev() in rebaseset
     ui.log(
         b"rebase",
@@ -1416,7 +1428,7 @@
                 elif size == 0:
                     ui.note(_(b'skipping %s - empty destination\n') % repo[r])
                 else:
-                    raise error.Abort(
+                    raise error.InputError(
                         _(b'rebase destination for %s is not unique') % repo[r]
                     )
 
@@ -1449,7 +1461,7 @@
         return nullrev
     if len(parents) == 1:
         return parents.pop()
-    raise error.Abort(
+    raise error.StateError(
         _(
             b'unable to collapse on top of %d, there is more '
             b'than one external parent: %s'
@@ -1649,7 +1661,7 @@
             b"to force the rebase please set "
             b"experimental.evolution.allowdivergence=True"
         )
-        raise error.Abort(msg % (b",".join(divhashes),), hint=h)
+        raise error.StateError(msg % (b",".join(divhashes),), hint=h)
 
 
 def successorrevs(unfi, rev):
@@ -1752,7 +1764,7 @@
         #    /|    # None of A and B will be changed to D and rebase fails.
         #   A B D
         if set(newps) == set(oldps) and dest not in newps:
-            raise error.Abort(
+            raise error.InputError(
                 _(
                     b'cannot rebase %d:%s without '
                     b'moving at least one of its parents'
@@ -1764,7 +1776,7 @@
     # impossible. With multi-dest, the initial check does not cover complex
     # cases since we don't have abstractions to dry-run rebase cheaply.
     if any(p != nullrev and isancestor(rev, p) for p in newps):
-        raise error.Abort(_(b'source is ancestor of destination'))
+        raise error.InputError(_(b'source is ancestor of destination'))
 
     # Check if the merge will contain unwanted changes. That may happen if
     # there are multiple special (non-changelog ancestor) merge bases, which
@@ -1826,7 +1838,7 @@
                         if revs is not None
                     )
                 )
-                raise error.Abort(
+                raise error.InputError(
                     _(b'rebasing %d:%s will include unwanted changes from %s')
                     % (rev, repo[rev], unwanteddesc)
                 )
@@ -1971,7 +1983,7 @@
             if destmap[r] not in srcset:
                 result.append(r)
         if not result:
-            raise error.Abort(_(b'source and destination form a cycle'))
+            raise error.InputError(_(b'source and destination form a cycle'))
         srcset -= set(result)
         yield result
 
@@ -1991,12 +2003,12 @@
     if b'qtip' in repo.tags():
         mqapplied = {repo[s.node].rev() for s in repo.mq.applied}
         if set(destmap.values()) & mqapplied:
-            raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
+            raise error.StateError(_(b'cannot rebase onto an applied mq patch'))
 
     # Get "cycle" error early by exhausting the generator.
     sortedsrc = list(sortsource(destmap))  # a list of sorted revs
     if not sortedsrc:
-        raise error.Abort(_(b'no matching revisions'))
+        raise error.InputError(_(b'no matching revisions'))
 
     # Only check the first batch of revisions to rebase not depending on other
     # rebaseset. This means "source is ancestor of destination" for the second
@@ -2004,7 +2016,7 @@
     # "defineparents" to do that check.
     roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
     if not roots:
-        raise error.Abort(_(b'no matching revisions'))
+        raise error.InputError(_(b'no matching revisions'))
 
     def revof(r):
         return r.rev()
@@ -2016,7 +2028,7 @@
         dest = repo[destmap[root.rev()]]
         commonbase = root.ancestor(dest)
         if commonbase == root:
-            raise error.Abort(_(b'source is ancestor of destination'))
+            raise error.InputError(_(b'source is ancestor of destination'))
         if commonbase == dest:
             wctx = repo[None]
             if dest == wctx.p1():
@@ -2109,7 +2121,7 @@
         if ui.configbool(b'commands', b'rebase.requiredest'):
             msg = _(b'rebase destination required by configuration')
             hint = _(b'use hg pull followed by hg rebase -d DEST')
-            raise error.Abort(msg, hint=hint)
+            raise error.InputError(msg, hint=hint)
 
         with repo.wlock(), repo.lock():
             if opts.get('update'):
@@ -2166,17 +2178,12 @@
                         commands.update(ui, repo)
     else:
         if opts.get('tool'):
-            raise error.Abort(_(b'--tool can only be used with --rebase'))
+            raise error.InputError(_(b'--tool can only be used with --rebase'))
         ret = orig(ui, repo, *args, **opts)
 
     return ret
 
 
-def _filterobsoleterevs(repo, revs):
-    """returns a set of the obsolete revisions in revs"""
-    return {r for r in revs if repo[r].obsolete()}
-
-
 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
     """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
 
--- a/hgext/releasenotes.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/releasenotes.py	Thu Mar 18 18:24:59 2021 -0400
@@ -280,7 +280,7 @@
 
     if b'.hgreleasenotes' in ctx:
         read(b'.hgreleasenotes')
-    return p[b'sections']
+    return p.items(b'sections')
 
 
 def checkadmonitions(ui, repo, directives, revs):
--- a/hgext/remotefilelog/connectionpool.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/remotefilelog/connectionpool.py	Thu Mar 18 18:24:59 2021 -0400
@@ -8,7 +8,6 @@
 from __future__ import absolute_import
 
 from mercurial import (
-    extensions,
     hg,
     pycompat,
     sshpeer,
@@ -43,17 +42,19 @@
 
         if conn is None:
 
-            def _cleanup(orig):
-                # close pipee first so peer.cleanup reading it won't deadlock,
-                # if there are other processes with pipeo open (i.e. us).
-                peer = orig.im_self
-                if util.safehasattr(peer, 'pipee'):
-                    peer.pipee.close()
-                return orig()
+            peer = hg.peer(self._repo.ui, {}, path)
+            if util.safehasattr(peer, '_cleanup'):
 
-            peer = hg.peer(self._repo.ui, {}, path)
-            if util.safehasattr(peer, 'cleanup'):
-                extensions.wrapfunction(peer, b'cleanup', _cleanup)
+                class mypeer(peer.__class__):
+                    def _cleanup(self, warn=None):
+                        # close pipee first so peer.cleanup reading it won't
+                        # deadlock, if there are other processes with pipeo
+                        # open (i.e. us).
+                        if util.safehasattr(self, 'pipee'):
+                            self.pipee.close()
+                        return super(mypeer, self)._cleanup()
+
+                peer.__class__ = mypeer
 
             conn = connection(pathpool, peer)
 
--- a/hgext/remotefilelog/remotefilelog.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/remotefilelog/remotefilelog.py	Thu Mar 18 18:24:59 2021 -0400
@@ -155,12 +155,12 @@
         # text passed to "addrevision" includes hg filelog metadata header
         if node is None:
             node = storageutil.hashrevisionsha1(text, p1, p2)
-        if sidedata is None:
-            sidedata = {}
 
         meta, metaoffset = storageutil.parsemeta(text)
         rawtext, validatehash = flagutil.processflagswrite(
-            self, text, flags, sidedata=sidedata
+            self,
+            text,
+            flags,
         )
         return self.addrawrevision(
             rawtext,
@@ -306,6 +306,7 @@
         assumehaveparentrevisions=False,
         deltaprevious=False,
         deltamode=None,
+        sidedata_helpers=None,
     ):
         # we don't use any of these parameters here
         del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
@@ -333,6 +334,8 @@
                 baserevisionsize=None,
                 revision=revision,
                 delta=delta,
+                # Sidedata is not supported yet
+                sidedata=None,
             )
 
     def revdiff(self, node1, node2):
--- a/hgext/remotefilelog/shallowbundle.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/remotefilelog/shallowbundle.py	Thu Mar 18 18:24:59 2021 -0400
@@ -67,7 +67,7 @@
             shallowcg1packer, self, nodelist, rlog, lookup, units=units
         )
 
-    def generatefiles(self, changedfiles, *args):
+    def generatefiles(self, changedfiles, *args, **kwargs):
         try:
             linknodes, commonrevs, source = args
         except ValueError:
@@ -92,7 +92,9 @@
                     [f for f in changedfiles if not repo.shallowmatch(f)]
                 )
 
-        return super(shallowcg1packer, self).generatefiles(changedfiles, *args)
+        return super(shallowcg1packer, self).generatefiles(
+            changedfiles, *args, **kwargs
+        )
 
     def shouldaddfilegroups(self, source):
         repo = self._repo
@@ -176,9 +178,11 @@
         repo.shallowmatch = original
 
 
-def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
+def addchangegroupfiles(
+    orig, repo, source, revmap, trp, expectedfiles, *args, **kwargs
+):
     if not shallowutil.isenabled(repo):
-        return orig(repo, source, revmap, trp, expectedfiles, *args)
+        return orig(repo, source, revmap, trp, expectedfiles, *args, **kwargs)
 
     newfiles = 0
     visited = set()
@@ -272,7 +276,7 @@
 
         revisiondata = revisiondatas[(f, node)]
         # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
-        node, p1, p2, linknode, deltabase, delta, flags = revisiondata
+        node, p1, p2, linknode, deltabase, delta, flags, sidedata = revisiondata
 
         if not available(f, node, f, deltabase):
             continue
--- a/hgext/split.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/split.py	Thu Mar 18 18:24:59 2021 -0400
@@ -27,6 +27,7 @@
     revsetlang,
     rewriteutil,
     scmutil,
+    util,
 )
 
 # allow people to use split without explicitly enabling rebase extension
@@ -69,57 +70,62 @@
     if opts.get(b'rev'):
         revlist.append(opts.get(b'rev'))
     revlist.extend(revs)
-    with repo.wlock(), repo.lock(), repo.transaction(b'split') as tr:
-        revs = scmutil.revrange(repo, revlist or [b'.'])
-        if len(revs) > 1:
-            raise error.InputError(_(b'cannot split multiple revisions'))
+    with repo.wlock(), repo.lock():
+        tr = repo.transaction(b'split')
+        # If the rebase somehow runs into conflicts, make sure
+        # we close the transaction so the user can continue it.
+        with util.acceptintervention(tr):
+            revs = scmutil.revrange(repo, revlist or [b'.'])
+            if len(revs) > 1:
+                raise error.InputError(_(b'cannot split multiple revisions'))
 
-        rev = revs.first()
-        ctx = repo[rev]
-        # Handle nullid specially here (instead of leaving for precheck()
-        # below) so we get a nicer message and error code.
-        if rev is None or ctx.node() == nullid:
-            ui.status(_(b'nothing to split\n'))
-            return 1
-        if ctx.node() is None:
-            raise error.InputError(_(b'cannot split working directory'))
+            rev = revs.first()
+            ctx = repo[rev]
+            # Handle nullid specially here (instead of leaving for precheck()
+            # below) so we get a nicer message and error code.
+            if rev is None or ctx.node() == nullid:
+                ui.status(_(b'nothing to split\n'))
+                return 1
+            if ctx.node() is None:
+                raise error.InputError(_(b'cannot split working directory'))
 
-        if opts.get(b'rebase'):
-            # Skip obsoleted descendants and their descendants so the rebase
-            # won't cause conflicts for sure.
-            descendants = list(repo.revs(b'(%d::) - (%d)', rev, rev))
-            torebase = list(
-                repo.revs(
-                    b'%ld - (%ld & obsolete())::', descendants, descendants
+            if opts.get(b'rebase'):
+                # Skip obsoleted descendants and their descendants so the rebase
+                # won't cause conflicts for sure.
+                descendants = list(repo.revs(b'(%d::) - (%d)', rev, rev))
+                torebase = list(
+                    repo.revs(
+                        b'%ld - (%ld & obsolete())::', descendants, descendants
+                    )
                 )
-            )
-        else:
-            torebase = []
-        rewriteutil.precheck(repo, [rev] + torebase, b'split')
+            else:
+                torebase = []
+            rewriteutil.precheck(repo, [rev] + torebase, b'split')
 
-        if len(ctx.parents()) > 1:
-            raise error.InputError(_(b'cannot split a merge changeset'))
+            if len(ctx.parents()) > 1:
+                raise error.InputError(_(b'cannot split a merge changeset'))
 
-        cmdutil.bailifchanged(repo)
+            cmdutil.bailifchanged(repo)
 
-        # Deactivate bookmark temporarily so it won't get moved unintentionally
-        bname = repo._activebookmark
-        if bname and repo._bookmarks[bname] != ctx.node():
-            bookmarks.deactivate(repo)
+            # Deactivate bookmark temporarily so it won't get moved
+            # unintentionally
+            bname = repo._activebookmark
+            if bname and repo._bookmarks[bname] != ctx.node():
+                bookmarks.deactivate(repo)
 
-        wnode = repo[b'.'].node()
-        top = None
-        try:
-            top = dosplit(ui, repo, tr, ctx, opts)
-        finally:
-            # top is None: split failed, need update --clean recovery.
-            # wnode == ctx.node(): wnode split, no need to update.
-            if top is None or wnode != ctx.node():
-                hg.clean(repo, wnode, show_stats=False)
-            if bname:
-                bookmarks.activate(repo, bname)
-        if torebase and top:
-            dorebase(ui, repo, torebase, top)
+            wnode = repo[b'.'].node()
+            top = None
+            try:
+                top = dosplit(ui, repo, tr, ctx, opts)
+            finally:
+                # top is None: split failed, need update --clean recovery.
+                # wnode == ctx.node(): wnode split, no need to update.
+                if top is None or wnode != ctx.node():
+                    hg.clean(repo, wnode, show_stats=False)
+                if bname:
+                    bookmarks.activate(repo, bname)
+            if torebase and top:
+                dorebase(ui, repo, torebase, top)
 
 
 def dosplit(ui, repo, tr, ctx, opts):
--- a/hgext/sqlitestore.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/sqlitestore.py	Thu Mar 18 18:24:59 2021 -0400
@@ -54,6 +54,7 @@
 from mercurial.node import (
     nullid,
     nullrev,
+    sha1nodeconstants,
     short,
 )
 from mercurial.thirdparty import attr
@@ -288,6 +289,7 @@
     baserevisionsize = attr.ib()
     revision = attr.ib()
     delta = attr.ib()
+    sidedata = attr.ib()
     linknode = attr.ib(default=None)
 
 
@@ -304,6 +306,7 @@
     """Implements storage for an individual tracked path."""
 
     def __init__(self, db, path, compression):
+        self.nullid = sha1nodeconstants.nullid
         self._db = db
         self._path = path
 
@@ -586,6 +589,7 @@
         revisiondata=False,
         assumehaveparentrevisions=False,
         deltamode=repository.CG_DELTAMODE_STD,
+        sidedata_helpers=None,
     ):
         if nodesorder not in (b'nodes', b'storage', b'linear', None):
             raise error.ProgrammingError(
@@ -624,6 +628,7 @@
             revisiondata=revisiondata,
             assumehaveparentrevisions=assumehaveparentrevisions,
             deltamode=deltamode,
+            sidedata_helpers=sidedata_helpers,
         ):
 
             yield delta
@@ -636,7 +641,8 @@
         if meta or filedata.startswith(b'\x01\n'):
             filedata = storageutil.packmeta(meta, filedata)
 
-        return self.addrevision(filedata, transaction, linkrev, p1, p2)
+        rev = self.addrevision(filedata, transaction, linkrev, p1, p2)
+        return self.node(rev)
 
     def addrevision(
         self,
@@ -658,15 +664,16 @@
         if validatehash:
             self._checkhash(revisiondata, node, p1, p2)
 
-        if node in self._nodetorev:
-            return node
+        rev = self._nodetorev.get(node)
+        if rev is not None:
+            return rev
 
-        node = self._addrawrevision(
+        rev = self._addrawrevision(
             node, revisiondata, transaction, linkrev, p1, p2
         )
 
         self._revisioncache[node] = revisiondata
-        return node
+        return rev
 
     def addgroup(
         self,
@@ -679,7 +686,16 @@
     ):
         empty = True
 
-        for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
+        for (
+            node,
+            p1,
+            p2,
+            linknode,
+            deltabase,
+            delta,
+            wireflags,
+            sidedata,
+        ) in deltas:
             storeflags = 0
 
             if wireflags & repository.REVISION_FLAG_CENSORED:
@@ -741,7 +757,7 @@
                     )
 
                 if duplicaterevisioncb:
-                    duplicaterevisioncb(self, node)
+                    duplicaterevisioncb(self, self.rev(node))
                 empty = False
                 continue
 
@@ -752,7 +768,7 @@
                 text = None
                 storedelta = (deltabase, delta)
 
-            self._addrawrevision(
+            rev = self._addrawrevision(
                 node,
                 text,
                 transaction,
@@ -764,7 +780,7 @@
             )
 
             if addrevisioncb:
-                addrevisioncb(self, node)
+                addrevisioncb(self, rev)
             empty = False
 
         return not empty
@@ -897,6 +913,10 @@
     def files(self):
         return []
 
+    def sidedata(self, nodeorrev, _df=None):
+        # Not supported for now
+        return {}
+
     def storageinfo(
         self,
         exclusivefiles=False,
@@ -1079,7 +1099,7 @@
         self._revtonode[rev] = node
         self._revisions[node] = entry
 
-        return node
+        return rev
 
 
 class sqliterepository(localrepo.localrepository):
--- a/hgext/uncommit.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/hgext/uncommit.py	Thu Mar 18 18:24:59 2021 -0400
@@ -175,7 +175,7 @@
         old = repo[b'.']
         rewriteutil.precheck(repo, [old.rev()], b'uncommit')
         if len(old.parents()) > 1:
-            raise error.Abort(_(b"cannot uncommit merge changeset"))
+            raise error.InputError(_(b"cannot uncommit merge changeset"))
 
         match = scmutil.match(old, pats, opts)
 
@@ -202,7 +202,7 @@
                 else:
                     hint = _(b"file does not exist")
 
-                raise error.Abort(
+                raise error.InputError(
                     _(b'cannot uncommit "%s"') % scmutil.getuipathfn(repo)(f),
                     hint=hint,
                 )
@@ -280,7 +280,7 @@
         markers = list(predecessormarkers(curctx))
         if len(markers) != 1:
             e = _(b"changeset must have one predecessor, found %i predecessors")
-            raise error.Abort(e % len(markers))
+            raise error.InputError(e % len(markers))
 
         prednode = markers[0].prednode()
         predctx = unfi[prednode]
--- a/mercurial/bitmanipulation.h	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/bitmanipulation.h	Thu Mar 18 18:24:59 2021 -0400
@@ -5,6 +5,18 @@
 
 #include "compat.h"
 
+/* Reads a 64 bit integer from big-endian bytes. Assumes that the data is long
+ enough */
+static inline uint64_t getbe64(const char *c)
+{
+	const unsigned char *d = (const unsigned char *)c;
+
+	return ((((uint64_t)d[0]) << 56) | (((uint64_t)d[1]) << 48) |
+	        (((uint64_t)d[2]) << 40) | (((uint64_t)d[3]) << 32) |
+	        (((uint64_t)d[4]) << 24) | (((uint64_t)d[5]) << 16) |
+	        (((uint64_t)d[6]) << 8) | (d[7]));
+}
+
 static inline uint32_t getbe32(const char *c)
 {
 	const unsigned char *d = (const unsigned char *)c;
@@ -27,6 +39,20 @@
 	return ((d[0] << 8) | (d[1]));
 }
 
+/* Writes a 64 bit integer to bytes in a big-endian format.
+ Assumes that the buffer is long enough */
+static inline void putbe64(uint64_t x, char *c)
+{
+	c[0] = (x >> 56) & 0xff;
+	c[1] = (x >> 48) & 0xff;
+	c[2] = (x >> 40) & 0xff;
+	c[3] = (x >> 32) & 0xff;
+	c[4] = (x >> 24) & 0xff;
+	c[5] = (x >> 16) & 0xff;
+	c[6] = (x >> 8) & 0xff;
+	c[7] = (x)&0xff;
+}
+
 static inline void putbe32(uint32_t x, char *c)
 {
 	c[0] = (x >> 24) & 0xff;
--- a/mercurial/bookmarks.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/bookmarks.py	Thu Mar 18 18:24:59 2021 -0400
@@ -623,7 +623,7 @@
 _binaryentry = struct.Struct(b'>20sH')
 
 
-def binaryencode(bookmarks):
+def binaryencode(repo, bookmarks):
     """encode a '(bookmark, node)' iterable into a binary stream
 
     the binary format is:
@@ -645,7 +645,7 @@
     return b''.join(binarydata)
 
 
-def binarydecode(stream):
+def binarydecode(repo, stream):
     """decode a binary stream into an '(bookmark, node)' iterable
 
     the binary format is:
--- a/mercurial/branchmap.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/branchmap.py	Thu Mar 18 18:24:59 2021 -0400
@@ -39,6 +39,7 @@
         Tuple,
         Union,
     )
+    from . import localrepo
 
     assert any(
         (
@@ -51,6 +52,7 @@
             Set,
             Tuple,
             Union,
+            localrepo,
         )
     )
 
@@ -97,7 +99,7 @@
                 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
             else:
                 # nothing to fall back on, start empty.
-                bcache = branchcache()
+                bcache = branchcache(repo)
 
         revs.extend(cl.revs(start=bcache.tiprev + 1))
         if revs:
@@ -129,6 +131,7 @@
         if rbheads:
             rtiprev = max((int(clrev(node)) for node in rbheads))
             cache = branchcache(
+                repo,
                 remotebranchmap,
                 repo[rtiprev].node(),
                 rtiprev,
@@ -184,6 +187,7 @@
 
     def __init__(
         self,
+        repo,
         entries=(),
         tipnode=nullid,
         tiprev=nullrev,
@@ -191,10 +195,11 @@
         closednodes=None,
         hasnode=None,
     ):
-        # type: (Union[Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]], bytes,  int, Optional[bytes], Optional[Set[bytes]], Optional[Callable[[bytes], bool]]) -> None
+        # type: (localrepo.localrepository, Union[Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]], bytes,  int, Optional[bytes], Optional[Set[bytes]], Optional[Callable[[bytes], bool]]) -> None
         """hasnode is a function which can be used to verify whether changelog
         has a given node or not. If it's not provided, we assume that every node
         we have exists in changelog"""
+        self._repo = repo
         self.tipnode = tipnode
         self.tiprev = tiprev
         self.filteredhash = filteredhash
@@ -280,6 +285,7 @@
             if len(cachekey) > 2:
                 filteredhash = bin(cachekey[2])
             bcache = cls(
+                repo,
                 tipnode=last,
                 tiprev=lrev,
                 filteredhash=filteredhash,
@@ -386,6 +392,7 @@
     def copy(self):
         """return an deep copy of the branchcache object"""
         return type(self)(
+            self._repo,
             self._entries,
             self.tipnode,
             self.tiprev,
@@ -564,6 +571,7 @@
 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
 _rbcrecfmt = b'>4sI'
 _rbcrecsize = calcsize(_rbcrecfmt)
+_rbcmininc = 64 * _rbcrecsize
 _rbcnodelen = 4
 _rbcbranchidxmask = 0x7FFFFFFF
 _rbccloseflag = 0x80000000
@@ -703,8 +711,10 @@
         self._setcachedata(rev, reponode, branchidx)
         return b, close
 
-    def setdata(self, branch, rev, node, close):
+    def setdata(self, rev, changelogrevision):
         """add new data information to the cache"""
+        branch, close = changelogrevision.branchinfo
+
         if branch in self._namesreverse:
             branchidx = self._namesreverse[branch]
         else:
@@ -713,7 +723,7 @@
             self._namesreverse[branch] = branchidx
         if close:
             branchidx |= _rbccloseflag
-        self._setcachedata(rev, node, branchidx)
+        self._setcachedata(rev, self._repo.changelog.node(rev), branchidx)
         # If no cache data were readable (non exists, bad permission, etc)
         # the cache was bypassing itself by setting:
         #
@@ -728,11 +738,15 @@
         if rev == nullrev:
             return
         rbcrevidx = rev * _rbcrecsize
-        if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
-            self._rbcrevs.extend(
-                b'\0'
-                * (len(self._repo.changelog) * _rbcrecsize - len(self._rbcrevs))
-            )
+        requiredsize = rbcrevidx + _rbcrecsize
+        rbccur = len(self._rbcrevs)
+        if rbccur < requiredsize:
+            # bytearray doesn't allocate extra space at least in Python 3.7.
+            # When multiple changesets are added in a row, precise resize would
+            # result in quadratic complexity. Overallocate to compensate by
+            # use the classic doubling technique for dynamic arrays instead.
+            # If there was a gap in the map before, less space will be reserved.
+            self._rbcrevs.extend(b'\0' * max(_rbcmininc, requiredsize))
         pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
         self._rbcrevslen = min(self._rbcrevslen, rev)
 
--- a/mercurial/bundle2.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/bundle2.py	Thu Mar 18 18:24:59 2021 -0400
@@ -1598,7 +1598,6 @@
     b'digests': tuple(sorted(util.DIGESTS.keys())),
     b'remote-changegroup': (b'http', b'https'),
     b'hgtagsfnodes': (),
-    b'rev-branch-cache': (),
     b'phases': (b'heads',),
     b'stream': (b'v2',),
 }
@@ -1643,6 +1642,9 @@
     # Else always advertise support on client, because payload support
     # should always be advertised.
 
+    # b'rev-branch-cache is no longer advertised, but still supported
+    # for legacy clients.
+
     return caps
 
 
@@ -1769,7 +1771,7 @@
     for node in outgoing.ancestorsof:
         # Don't compute missing, as this may slow down serving.
         fnode = cache.getfnode(node, computemissing=False)
-        if fnode is not None:
+        if fnode:
             chunks.extend([node, fnode])
 
     if chunks:
@@ -1810,6 +1812,28 @@
     return params
 
 
+def format_remote_wanted_sidedata(repo):
+    """Formats a repo's wanted sidedata categories into a bytestring for
+    capabilities exchange."""
+    wanted = b""
+    if repo._wanted_sidedata:
+        wanted = b','.join(
+            pycompat.bytestr(c) for c in sorted(repo._wanted_sidedata)
+        )
+    return wanted
+
+
+def read_remote_wanted_sidedata(remote):
+    sidedata_categories = remote.capable(b'exp-wanted-sidedata')
+    return read_wanted_sidedata(sidedata_categories)
+
+
+def read_wanted_sidedata(formatted):
+    if formatted:
+        return set(formatted.split(b','))
+    return set()
+
+
 def addpartbundlestream2(bundler, repo, **kwargs):
     if not kwargs.get('stream', False):
         return
@@ -1955,6 +1979,7 @@
         b'version',
         b'nbchanges',
         b'exp-sidedata',
+        b'exp-wanted-sidedata',
         b'treemanifest',
         b'targetphase',
     ),
@@ -1997,11 +2022,15 @@
     targetphase = inpart.params.get(b'targetphase')
     if targetphase is not None:
         extrakwargs['targetphase'] = int(targetphase)
+
+    remote_sidedata = inpart.params.get(b'exp-wanted-sidedata')
+    extrakwargs['sidedata_categories'] = read_wanted_sidedata(remote_sidedata)
+
     ret = _processchangegroup(
         op,
         cg,
         tr,
-        b'bundle2',
+        op.source,
         b'bundle2',
         expectedtotal=nbchangesets,
         **extrakwargs
@@ -2083,7 +2112,7 @@
         raise error.Abort(
             _(b'%s: not a bundle version 1.0') % util.hidepassword(raw_url)
         )
-    ret = _processchangegroup(op, cg, tr, b'bundle2', b'bundle2')
+    ret = _processchangegroup(op, cg, tr, op.source, b'bundle2')
     if op.reply is not None:
         # This is definitely not the final form of this
         # return. But one need to start somewhere.
@@ -2117,7 +2146,7 @@
     contains binary encoded (bookmark, node) tuple. If the local state does
     not marks the one in the part, a PushRaced exception is raised
     """
-    bookdata = bookmarks.binarydecode(inpart)
+    bookdata = bookmarks.binarydecode(op.repo, inpart)
 
     msgstandard = (
         b'remote repository changed while pushing - please try again '
@@ -2347,7 +2376,7 @@
     When mode is 'records', the information is recorded into the 'bookmarks'
     records of the bundle operation. This behavior is suitable for pulling.
     """
-    changes = bookmarks.binarydecode(inpart)
+    changes = bookmarks.binarydecode(op.repo, inpart)
 
     pushkeycompat = op.repo.ui.configbool(
         b'server', b'bookmarks-pushkey-compat'
@@ -2478,35 +2507,10 @@
 
 @parthandler(b'cache:rev-branch-cache')
 def handlerbc(op, inpart):
-    """receive a rev-branch-cache payload and update the local cache
-
-    The payload is a series of data related to each branch
-
-    1) branch name length
-    2) number of open heads
-    3) number of closed heads
-    4) open heads nodes
-    5) closed heads nodes
-    """
-    total = 0
-    rawheader = inpart.read(rbcstruct.size)
-    cache = op.repo.revbranchcache()
-    cl = op.repo.unfiltered().changelog
-    while rawheader:
-        header = rbcstruct.unpack(rawheader)
-        total += header[1] + header[2]
-        utf8branch = inpart.read(header[0])
-        branch = encoding.tolocal(utf8branch)
-        for x in pycompat.xrange(header[1]):
-            node = inpart.read(20)
-            rev = cl.rev(node)
-            cache.setdata(branch, rev, node, False)
-        for x in pycompat.xrange(header[2]):
-            node = inpart.read(20)
-            rev = cl.rev(node)
-            cache.setdata(branch, rev, node, True)
-        rawheader = inpart.read(rbcstruct.size)
-    cache.write()
+    """Legacy part, ignored for compatibility with bundles from or
+    for Mercurial before 5.7. Newer Mercurial computes the cache
+    efficiently enough during unbundling that the additional transfer
+    is unnecessary."""
 
 
 @parthandler(b'pushvars')
@@ -2561,8 +2565,6 @@
     for r in repo.revs(b"::%ln", common):
         commonnodes.add(cl.node(r))
     if commonnodes:
-        # XXX: we should only send the filelogs (and treemanifest). user
-        # already has the changelog and manifest
         packer = changegroup.getbundler(
             cgversion,
             repo,
@@ -2584,5 +2586,7 @@
             part.addparam(b'treemanifest', b'1')
         if b'exp-sidedata-flag' in repo.requirements:
             part.addparam(b'exp-sidedata', b'1')
+            wanted = format_remote_wanted_sidedata(repo)
+            part.addparam(b'exp-wanted-sidedata', wanted)
 
     return bundler
--- a/mercurial/bundlecaches.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/bundlecaches.py	Thu Mar 18 18:24:59 2021 -0400
@@ -9,6 +9,7 @@
 
 from . import (
     error,
+    requirements as requirementsmod,
     sslutil,
     util,
 )
@@ -164,7 +165,7 @@
             compression = spec
             version = b'v1'
             # Generaldelta repos require v2.
-            if b'generaldelta' in repo.requirements:
+            if requirementsmod.GENERALDELTA_REQUIREMENT in repo.requirements:
                 version = b'v2'
             # Modern compression engines require v2.
             if compression not in _bundlespecv1compengines:
--- a/mercurial/bundlerepo.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/bundlerepo.py	Thu Mar 18 18:24:59 2021 -0400
@@ -61,7 +61,7 @@
         self.repotiprev = n - 1
         self.bundlerevs = set()  # used by 'bundle()' revset expression
         for deltadata in cgunpacker.deltaiter():
-            node, p1, p2, cs, deltabase, delta, flags = deltadata
+            node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
 
             size = len(delta)
             start = cgunpacker.tell() - size
@@ -175,9 +175,15 @@
 
 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
     def __init__(
-        self, opener, cgunpacker, linkmapper, dirlogstarts=None, dir=b''
+        self,
+        nodeconstants,
+        opener,
+        cgunpacker,
+        linkmapper,
+        dirlogstarts=None,
+        dir=b'',
     ):
-        manifest.manifestrevlog.__init__(self, opener, tree=dir)
+        manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
         bundlerevlog.__init__(
             self, opener, self.indexfile, cgunpacker, linkmapper
         )
@@ -192,6 +198,7 @@
         if d in self._dirlogstarts:
             self.bundle.seek(self._dirlogstarts[d])
             return bundlemanifest(
+                self.nodeconstants,
                 self.opener,
                 self.bundle,
                 self._linkmapper,
@@ -368,7 +375,9 @@
         # consume the header if it exists
         self._cgunpacker.manifestheader()
         linkmapper = self.unfiltered().changelog.rev
-        rootstore = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
+        rootstore = bundlemanifest(
+            self.nodeconstants, self.svfs, self._cgunpacker, linkmapper
+        )
         self.filestart = self._cgunpacker.tell()
 
         return manifest.manifestlog(
--- a/mercurial/cext/osutil.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/cext/osutil.c	Thu Mar 18 18:24:59 2021 -0400
@@ -119,7 +119,7 @@
 
 static void listdir_stat_dealloc(PyObject *o)
 {
-	o->ob_type->tp_free(o);
+	Py_TYPE(o)->tp_free(o);
 }
 
 static PyObject *listdir_stat_getitem(PyObject *self, PyObject *key)
--- a/mercurial/cext/parsers.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/cext/parsers.c	Thu Mar 18 18:24:59 2021 -0400
@@ -638,7 +638,7 @@
 PyObject *encodedir(PyObject *self, PyObject *args);
 PyObject *pathencode(PyObject *self, PyObject *args);
 PyObject *lowerencode(PyObject *self, PyObject *args);
-PyObject *parse_index2(PyObject *self, PyObject *args);
+PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
 
 static PyMethodDef methods[] = {
     {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
@@ -646,7 +646,8 @@
      "create a set containing non-normal and other parent entries of given "
      "dirstate\n"},
     {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
-    {"parse_index2", parse_index2, METH_VARARGS, "parse a revlog index\n"},
+    {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
+     "parse a revlog index\n"},
     {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
     {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
     {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
--- a/mercurial/cext/pathencode.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/cext/pathencode.c	Thu Mar 18 18:24:59 2021 -0400
@@ -21,6 +21,7 @@
 #include <ctype.h>
 #include <stdlib.h>
 #include <string.h>
+#include "pythoncapi_compat.h"
 
 #include "util.h"
 
@@ -678,7 +679,7 @@
 	}
 
 	assert(PyBytes_Check(ret));
-	Py_SIZE(ret) = destlen;
+	Py_SET_SIZE(ret, destlen);
 
 	return ret;
 }
--- a/mercurial/cext/revlog.c	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/cext/revlog.c	Thu Mar 18 18:24:59 2021 -0400
@@ -98,6 +98,7 @@
 	int ntlookups;          /* # lookups */
 	int ntmisses;           /* # lookups that miss the cache */
 	int inlined;
+	long hdrsize; /* size of index headers. Differs in v1 v.s. v2 format */
 };
 
 static Py_ssize_t index_length(const indexObject *self)
@@ -113,14 +114,21 @@
 static int index_find_node(indexObject *self, const char *node);
 
 #if LONG_MAX == 0x7fffffffL
-static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
+static const char *const v1_tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
+static const char *const v2_tuple_format =
+    PY23("Kiiiiiis#Ki", "Kiiiiiiy#Ki");
 #else
-static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
+static const char *const v1_tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
+static const char *const v2_tuple_format =
+    PY23("kiiiiiis#ki", "kiiiiiiy#ki");
 #endif
 
 /* A RevlogNG v1 index entry is 64 bytes long. */
 static const long v1_hdrsize = 64;
 
+/* A Revlogv2 index entry is 96 bytes long. */
+static const long v2_hdrsize = 96;
+
 static void raise_revlog_error(void)
 {
 	PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
@@ -157,7 +165,7 @@
 static const char *index_deref(indexObject *self, Py_ssize_t pos)
 {
 	if (pos >= self->length)
-		return self->added + (pos - self->length) * v1_hdrsize;
+		return self->added + (pos - self->length) * self->hdrsize;
 
 	if (self->inlined && pos > 0) {
 		if (self->offsets == NULL) {
@@ -174,7 +182,7 @@
 		return self->offsets[pos];
 	}
 
-	return (const char *)(self->buf.buf) + pos * v1_hdrsize;
+	return (const char *)(self->buf.buf) + pos * self->hdrsize;
 }
 
 /*
@@ -280,8 +288,9 @@
  */
 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
 {
-	uint64_t offset_flags;
-	int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
+	uint64_t offset_flags, sidedata_offset;
+	int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2,
+	    sidedata_comp_len;
 	const char *c_node_id;
 	const char *data;
 	Py_ssize_t length = index_length(self);
@@ -320,9 +329,19 @@
 	parent_2 = getbe32(data + 28);
 	c_node_id = data + 32;
 
-	return Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
-	                     base_rev, link_rev, parent_1, parent_2, c_node_id,
-	                     self->nodelen);
+	if (self->hdrsize == v1_hdrsize) {
+		return Py_BuildValue(v1_tuple_format, offset_flags, comp_len,
+		                     uncomp_len, base_rev, link_rev, parent_1,
+		                     parent_2, c_node_id, self->nodelen);
+	} else {
+		sidedata_offset = getbe64(data + 64);
+		sidedata_comp_len = getbe32(data + 72);
+
+		return Py_BuildValue(v2_tuple_format, offset_flags, comp_len,
+		                     uncomp_len, base_rev, link_rev, parent_1,
+		                     parent_2, c_node_id, self->nodelen,
+		                     sidedata_offset, sidedata_comp_len);
+	}
 }
 
 /*
@@ -373,18 +392,30 @@
 
 static PyObject *index_append(indexObject *self, PyObject *obj)
 {
-	uint64_t offset_flags;
+	uint64_t offset_flags, sidedata_offset;
 	int rev, comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
-	Py_ssize_t c_node_id_len;
+	Py_ssize_t c_node_id_len, sidedata_comp_len;
 	const char *c_node_id;
 	char *data;
 
-	if (!PyArg_ParseTuple(obj, tuple_format, &offset_flags, &comp_len,
-	                      &uncomp_len, &base_rev, &link_rev, &parent_1,
-	                      &parent_2, &c_node_id, &c_node_id_len)) {
-		PyErr_SetString(PyExc_TypeError, "8-tuple required");
-		return NULL;
+	if (self->hdrsize == v1_hdrsize) {
+		if (!PyArg_ParseTuple(obj, v1_tuple_format, &offset_flags,
+		                      &comp_len, &uncomp_len, &base_rev,
+		                      &link_rev, &parent_1, &parent_2,
+		                      &c_node_id, &c_node_id_len)) {
+			PyErr_SetString(PyExc_TypeError, "8-tuple required");
+			return NULL;
+		}
+	} else {
+		if (!PyArg_ParseTuple(
+		        obj, v2_tuple_format, &offset_flags, &comp_len,
+		        &uncomp_len, &base_rev, &link_rev, &parent_1, &parent_2,
+		        &c_node_id, &c_node_id_len, &sidedata_offset, &sidedata_comp_len)) {
+			PyErr_SetString(PyExc_TypeError, "10-tuple required");
+			return NULL;
+		}
 	}
+
 	if (c_node_id_len != self->nodelen) {
 		PyErr_SetString(PyExc_TypeError, "invalid node");
 		return NULL;
@@ -393,15 +424,15 @@
 	if (self->new_length == self->added_length) {
 		size_t new_added_length =
 		    self->added_length ? self->added_length * 2 : 4096;
-		void *new_added =
-		    PyMem_Realloc(self->added, new_added_length * v1_hdrsize);
+		void *new_added = PyMem_Realloc(self->added, new_added_length *
+		                                                 self->hdrsize);
 		if (!new_added)
 			return PyErr_NoMemory();
 		self->added = new_added;
 		self->added_length = new_added_length;
 	}
 	rev = self->length + self->new_length;
-	data = self->added + v1_hdrsize * self->new_length++;
+	data = self->added + self->hdrsize * self->new_length++;
 	putbe32(offset_flags >> 32, data);
 	putbe32(offset_flags & 0xffffffffU, data + 4);
 	putbe32(comp_len, data + 8);
@@ -411,7 +442,14 @@
 	putbe32(parent_1, data + 24);
 	putbe32(parent_2, data + 28);
 	memcpy(data + 32, c_node_id, c_node_id_len);
+	/* Padding since SHA-1 is only 20 bytes for now */
 	memset(data + 32 + c_node_id_len, 0, 32 - c_node_id_len);
+	if (self->hdrsize != v1_hdrsize) {
+		putbe64(sidedata_offset, data + 64);
+		putbe32(sidedata_comp_len, data + 72);
+		/* Padding for 96 bytes alignment */
+		memset(data + 76, 0, self->hdrsize - 76);
+	}
 
 	if (self->ntinitialized)
 		nt_insert(&self->nt, c_node_id, rev);
@@ -420,6 +458,56 @@
 	Py_RETURN_NONE;
 }
 
+/* Replace an existing index entry's sidedata offset and length with new ones.
+   This cannot be used outside of the context of sidedata rewriting,
+   inside the transaction that creates the given revision. */
+static PyObject *index_replace_sidedata_info(indexObject *self, PyObject *args)
+{
+	uint64_t sidedata_offset;
+	int rev;
+	Py_ssize_t sidedata_comp_len;
+	char *data;
+  #if LONG_MAX == 0x7fffffffL
+	  const char *const sidedata_format = PY23("nKi", "nKi");
+	#else
+	  const char *const sidedata_format = PY23("nki", "nki");
+	#endif
+
+	if (self->hdrsize == v1_hdrsize || self->inlined) {
+		/*
+		 There is a bug in the transaction handling when going from an
+	   inline revlog to a separate index and data file. Turn it off until
+	   it's fixed, since v2 revlogs sometimes get rewritten on exchange.
+	   See issue6485.
+	  */
+		raise_revlog_error();
+		return NULL;
+	}
+
+	if (!PyArg_ParseTuple(args, sidedata_format, &rev, &sidedata_offset,
+	                      &sidedata_comp_len))
+		return NULL;
+
+	if (rev < 0 || rev >= index_length(self)) {
+		PyErr_SetString(PyExc_IndexError, "revision outside index");
+		return NULL;
+	}
+	if (rev < self->length) {
+		PyErr_SetString(
+		    PyExc_IndexError,
+		    "cannot rewrite entries outside of this transaction");
+		return NULL;
+	}
+
+	/* Find the newly added node, offset from the "already on-disk" length */
+	data = self->added + self->hdrsize * (rev - self->length);
+	putbe64(sidedata_offset, data + 64);
+	putbe32(sidedata_comp_len, data + 72);
+
+
+	Py_RETURN_NONE;
+}
+
 static PyObject *index_stats(indexObject *self)
 {
 	PyObject *obj = PyDict_New();
@@ -2563,14 +2651,17 @@
 	const char *data = (const char *)self->buf.buf;
 	Py_ssize_t pos = 0;
 	Py_ssize_t end = self->buf.len;
-	long incr = v1_hdrsize;
+	long incr = self->hdrsize;
 	Py_ssize_t len = 0;
 
-	while (pos + v1_hdrsize <= end && pos >= 0) {
-		uint32_t comp_len;
+	while (pos + self->hdrsize <= end && pos >= 0) {
+		uint32_t comp_len, sidedata_comp_len = 0;
 		/* 3rd element of header is length of compressed inline data */
 		comp_len = getbe32(data + pos + 8);
-		incr = v1_hdrsize + comp_len;
+		if (self->hdrsize == v2_hdrsize) {
+			sidedata_comp_len = getbe32(data + pos + 72);
+		}
+		incr = self->hdrsize + comp_len + sidedata_comp_len;
 		if (offsets)
 			offsets[len] = data + pos;
 		len++;
@@ -2586,11 +2677,13 @@
 	return len;
 }
 
-static int index_init(indexObject *self, PyObject *args)
+static int index_init(indexObject *self, PyObject *args, PyObject *kwargs)
 {
-	PyObject *data_obj, *inlined_obj;
+	PyObject *data_obj, *inlined_obj, *revlogv2;
 	Py_ssize_t size;
 
+	static char *kwlist[] = {"data", "inlined", "revlogv2", NULL};
+
 	/* Initialize before argument-checking to avoid index_dealloc() crash.
 	 */
 	self->added = NULL;
@@ -2606,7 +2699,9 @@
 	self->nodelen = 20;
 	self->nullentry = NULL;
 
-	if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
+	revlogv2 = NULL;
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|O", kwlist,
+	                                 &data_obj, &inlined_obj, &revlogv2))
 		return -1;
 	if (!PyObject_CheckBuffer(data_obj)) {
 		PyErr_SetString(PyExc_TypeError,
@@ -2618,8 +2713,22 @@
 		return -1;
 	}
 
-	self->nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0,
-	                                -1, -1, -1, -1, nullid, self->nodelen);
+	if (revlogv2 && PyObject_IsTrue(revlogv2)) {
+		self->hdrsize = v2_hdrsize;
+	} else {
+		self->hdrsize = v1_hdrsize;
+	}
+
+	if (self->hdrsize == v1_hdrsize) {
+		self->nullentry =
+		    Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0, -1,
+		                  -1, -1, -1, nullid, self->nodelen);
+	} else {
+		self->nullentry = Py_BuildValue(
+		    PY23("iiiiiiis#ii", "iiiiiiiy#ii"), 0, 0, 0, -1, -1, -1,
+		    -1, nullid, self->nodelen, 0, 0);
+	}
+
 	if (!self->nullentry)
 		return -1;
 	PyObject_GC_UnTrack(self->nullentry);
@@ -2641,11 +2750,11 @@
 			goto bail;
 		self->length = len;
 	} else {
-		if (size % v1_hdrsize) {
+		if (size % self->hdrsize) {
 			PyErr_SetString(PyExc_ValueError, "corrupt index file");
 			goto bail;
 		}
-		self->length = size / v1_hdrsize;
+		self->length = size / self->hdrsize;
 	}
 
 	return 0;
@@ -2730,6 +2839,8 @@
      "compute phases"},
     {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
      "reachableroots"},
+    {"replace_sidedata_info", (PyCFunction)index_replace_sidedata_info,
+     METH_VARARGS, "replace an existing index entry with a new value"},
     {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
      "get head revisions"}, /* Can do filtering since 3.2 */
     {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
@@ -2797,16 +2908,16 @@
 };
 
 /*
- * returns a tuple of the form (index, index, cache) with elements as
+ * returns a tuple of the form (index, cache) with elements as
  * follows:
  *
- * index: an index object that lazily parses RevlogNG records
+ * index: an index object that lazily parses Revlog (v1 or v2) records
  * cache: if data is inlined, a tuple (0, index_file_content), else None
  *        index_file_content could be a string, or a buffer
  *
  * added complications are for backwards compatibility
  */
-PyObject *parse_index2(PyObject *self, PyObject *args)
+PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs)
 {
 	PyObject *cache = NULL;
 	indexObject *idx;
@@ -2816,7 +2927,7 @@
 	if (idx == NULL)
 		goto bail;
 
-	ret = index_init(idx, args);
+	ret = index_init(idx, args, kwargs);
 	if (ret == -1)
 		goto bail;
 
--- a/mercurial/changegroup.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/changegroup.py	Thu Mar 18 18:24:59 2021 -0400
@@ -7,6 +7,7 @@
 
 from __future__ import absolute_import
 
+import collections
 import os
 import struct
 import weakref
@@ -32,6 +33,7 @@
 )
 
 from .interfaces import repository
+from .revlogutils import sidedata as sidedatamod
 
 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
@@ -202,7 +204,9 @@
         header = self.deltaheader.unpack(headerdata)
         delta = readexactly(self._stream, l - self.deltaheadersize)
         node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
-        return (node, p1, p2, cs, deltabase, delta, flags)
+        # cg4 forward-compat
+        sidedata = {}
+        return (node, p1, p2, cs, deltabase, delta, flags, sidedata)
 
     def getchunks(self):
         """returns all the chunks contains in the bundle
@@ -249,7 +253,7 @@
                     pos = next
             yield closechunk()
 
-    def _unpackmanifests(self, repo, revmap, trp, prog):
+    def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
         self.callback = prog.increment
         # no need to check for empty manifest group here:
         # if the result of the merge of 1 and 2 is the same in 3 and 4,
@@ -257,7 +261,8 @@
         # be empty during the pull
         self.manifestheader()
         deltas = self.deltaiter()
-        repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
+        storage = repo.manifestlog.getstorage(b'')
+        storage.addgroup(deltas, revmap, trp, addrevisioncb=addrevisioncb)
         prog.complete()
         self.callback = None
 
@@ -269,6 +274,7 @@
         url,
         targetphase=phases.draft,
         expectedtotal=None,
+        sidedata_categories=None,
     ):
         """Add the changegroup returned by source.read() to this repo.
         srctype is a string like 'push', 'pull', or 'unbundle'.  url is
@@ -279,9 +285,23 @@
         - more heads than before: 1+added heads (2..n)
         - fewer heads than before: -1-removed heads (-2..-n)
         - number of heads stays the same: 1
+
+        `sidedata_categories` is an optional set of the remote's sidedata wanted
+        categories.
         """
         repo = repo.unfiltered()
 
+        # Only useful if we're adding sidedata categories. If both peers have
+        # the same categories, then we simply don't do anything.
+        if self.version == b'04' and srctype == b'pull':
+            sidedata_helpers = get_sidedata_helpers(
+                repo,
+                sidedata_categories or set(),
+                pull=True,
+            )
+        else:
+            sidedata_helpers = None
+
         def csmap(x):
             repo.ui.debug(b"add changeset %s\n" % short(x))
             return len(cl)
@@ -316,14 +336,16 @@
             self.callback = progress.increment
 
             efilesset = set()
-            cgnodes = []
+            duprevs = []
 
-            def ondupchangelog(cl, node):
-                if cl.rev(node) < clstart:
-                    cgnodes.append(node)
+            def ondupchangelog(cl, rev):
+                if rev < clstart:
+                    duprevs.append(rev)
 
-            def onchangelog(cl, node):
-                efilesset.update(cl.readfiles(node))
+            def onchangelog(cl, rev):
+                ctx = cl.changelogrevision(rev)
+                efilesset.update(ctx.files)
+                repo.register_changeset(rev, ctx)
 
             self.changelogheader()
             deltas = self.deltaiter()
@@ -331,6 +353,7 @@
                 deltas,
                 csmap,
                 trp,
+                alwayscache=True,
                 addrevisioncb=onchangelog,
                 duplicaterevisioncb=ondupchangelog,
             ):
@@ -348,6 +371,13 @@
             efilesset = None
             self.callback = None
 
+            # Keep track of the (non-changelog) revlogs we've updated and their
+            # range of new revisions for sidedata rewrite.
+            # TODO do something more efficient than keeping the reference to
+            # the revlogs, especially memory-wise.
+            touched_manifests = {}
+            touched_filelogs = {}
+
             # pull off the manifest group
             repo.ui.status(_(b"adding manifests\n"))
             # We know that we'll never have more manifests than we had
@@ -355,7 +385,24 @@
             progress = repo.ui.makeprogress(
                 _(b'manifests'), unit=_(b'chunks'), total=changesets
             )
-            self._unpackmanifests(repo, revmap, trp, progress)
+            on_manifest_rev = None
+            if sidedata_helpers and b'manifest' in sidedata_helpers[1]:
+
+                def on_manifest_rev(manifest, rev):
+                    range = touched_manifests.get(manifest)
+                    if not range:
+                        touched_manifests[manifest] = (rev, rev)
+                    else:
+                        assert rev == range[1] + 1
+                        touched_manifests[manifest] = (range[0], rev)
+
+            self._unpackmanifests(
+                repo,
+                revmap,
+                trp,
+                progress,
+                addrevisioncb=on_manifest_rev,
+            )
 
             needfiles = {}
             if repo.ui.configbool(b'server', b'validate'):
@@ -369,12 +416,37 @@
                     for f, n in pycompat.iteritems(mfest):
                         needfiles.setdefault(f, set()).add(n)
 
+            on_filelog_rev = None
+            if sidedata_helpers and b'filelog' in sidedata_helpers[1]:
+
+                def on_filelog_rev(filelog, rev):
+                    range = touched_filelogs.get(filelog)
+                    if not range:
+                        touched_filelogs[filelog] = (rev, rev)
+                    else:
+                        assert rev == range[1] + 1
+                        touched_filelogs[filelog] = (range[0], rev)
+
             # process the files
             repo.ui.status(_(b"adding file changes\n"))
             newrevs, newfiles = _addchangegroupfiles(
-                repo, self, revmap, trp, efiles, needfiles
+                repo,
+                self,
+                revmap,
+                trp,
+                efiles,
+                needfiles,
+                addrevisioncb=on_filelog_rev,
             )
 
+            if sidedata_helpers:
+                if b'changelog' in sidedata_helpers[1]:
+                    cl.rewrite_sidedata(sidedata_helpers, clstart, clend - 1)
+                for mf, (startrev, endrev) in touched_manifests.items():
+                    mf.rewrite_sidedata(sidedata_helpers, startrev, endrev)
+                for fl, (startrev, endrev) in touched_filelogs.items():
+                    fl.rewrite_sidedata(sidedata_helpers, startrev, endrev)
+
             # making sure the value exists
             tr.changes.setdefault(b'changegroup-count-changesets', 0)
             tr.changes.setdefault(b'changegroup-count-revisions', 0)
@@ -445,8 +517,12 @@
             if added:
                 phases.registernew(repo, tr, targetphase, added)
             if phaseall is not None:
-                phases.advanceboundary(repo, tr, phaseall, cgnodes, revs=added)
-                cgnodes = []
+                if duprevs:
+                    duprevs.extend(added)
+                else:
+                    duprevs = added
+                phases.advanceboundary(repo, tr, phaseall, [], revs=duprevs)
+                duprevs = []
 
             if changesets > 0:
 
@@ -534,17 +610,44 @@
         node, p1, p2, deltabase, cs, flags = headertuple
         return node, p1, p2, deltabase, cs, flags
 
-    def _unpackmanifests(self, repo, revmap, trp, prog):
-        super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
+    def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
+        super(cg3unpacker, self)._unpackmanifests(
+            repo, revmap, trp, prog, addrevisioncb=addrevisioncb
+        )
         for chunkdata in iter(self.filelogheader, {}):
             # If we get here, there are directory manifests in the changegroup
             d = chunkdata[b"filename"]
             repo.ui.debug(b"adding %s revisions\n" % d)
             deltas = self.deltaiter()
-            if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
+            if not repo.manifestlog.getstorage(d).addgroup(
+                deltas, revmap, trp, addrevisioncb=addrevisioncb
+            ):
                 raise error.Abort(_(b"received dir revlog group is empty"))
 
 
+class cg4unpacker(cg3unpacker):
+    """Unpacker for cg4 streams.
+
+    cg4 streams add support for exchanging sidedata.
+    """
+
+    version = b'04'
+
+    def deltachunk(self, prevnode):
+        res = super(cg4unpacker, self).deltachunk(prevnode)
+        if not res:
+            return res
+
+        (node, p1, p2, cs, deltabase, delta, flags, _sidedata) = res
+
+        sidedata_raw = getchunk(self._stream)
+        sidedata = {}
+        if len(sidedata_raw) > 0:
+            sidedata = sidedatamod.deserialize_sidedata(sidedata_raw)
+
+        return node, p1, p2, cs, deltabase, delta, flags, sidedata
+
+
 class headerlessfixup(object):
     def __init__(self, fh, h):
         self._h = h
@@ -559,7 +662,7 @@
         return readexactly(self._fh, n)
 
 
-def _revisiondeltatochunks(delta, headerfn):
+def _revisiondeltatochunks(repo, delta, headerfn):
     """Serialize a revisiondelta to changegroup chunks."""
 
     # The captured revision delta may be encoded as a delta against
@@ -585,6 +688,13 @@
         yield prefix
     yield data
 
+    sidedata = delta.sidedata
+    if sidedata is not None:
+        # Need a separate chunk for sidedata to be able to differentiate
+        # "raw delta" length and sidedata length
+        yield chunkheader(len(sidedata))
+        yield sidedata
+
 
 def _sortnodesellipsis(store, nodes, cl, lookup):
     """Sort nodes for changegroup generation."""
@@ -678,7 +788,7 @@
                 # We failed to resolve a parent for this node, so
                 # we crash the changegroup construction.
                 raise error.Abort(
-                    b'unable to resolve parent while packing %r %r'
+                    b"unable to resolve parent while packing '%s' %r"
                     b' for changeset %r' % (store.indexfile, rev, clrev)
                 )
 
@@ -709,6 +819,7 @@
     clrevtolocalrev=None,
     fullclnodes=None,
     precomputedellipsis=None,
+    sidedata_helpers=None,
 ):
     """Calculate deltas for a set of revisions.
 
@@ -716,6 +827,8 @@
 
     If topic is not None, progress detail will be generated using this
     topic name (e.g. changesets, manifests, etc).
+
+    See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
     """
     if not nodes:
         return
@@ -814,6 +927,7 @@
         revisiondata=True,
         assumehaveparentrevisions=not ellipses,
         deltamode=deltamode,
+        sidedata_helpers=sidedata_helpers,
     )
 
     for i, revision in enumerate(revisions):
@@ -854,6 +968,7 @@
         shallow=False,
         ellipsisroots=None,
         fullnodes=None,
+        remote_sidedata=None,
     ):
         """Given a source repo, construct a bundler.
 
@@ -886,6 +1001,8 @@
         nodes. We store this rather than the set of nodes that should be
         ellipsis because for very large histories we expect this to be
         significantly smaller.
+
+        remote_sidedata is the set of sidedata categories wanted by the remote.
         """
         assert oldmatcher
         assert matcher
@@ -902,6 +1019,9 @@
         if bundlecaps is None:
             bundlecaps = set()
         self._bundlecaps = bundlecaps
+        if remote_sidedata is None:
+            remote_sidedata = set()
+        self._remote_sidedata = remote_sidedata
         self._isshallow = shallow
         self._fullclnodes = fullnodes
 
@@ -928,11 +1048,26 @@
         self._verbosenote(_(b'uncompressed size of bundle content:\n'))
         size = 0
 
+        sidedata_helpers = None
+        if self.version == b'04':
+            remote_sidedata = self._remote_sidedata
+            if source == b'strip':
+                # We're our own remote when stripping, get the no-op helpers
+                # TODO a better approach would be for the strip bundle to
+                # correctly advertise its sidedata categories directly.
+                remote_sidedata = repo._wanted_sidedata
+            sidedata_helpers = get_sidedata_helpers(repo, remote_sidedata)
+
         clstate, deltas = self._generatechangelog(
-            cl, clnodes, generate=changelog
+            cl,
+            clnodes,
+            generate=changelog,
+            sidedata_helpers=sidedata_helpers,
         )
         for delta in deltas:
-            for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
+            for chunk in _revisiondeltatochunks(
+                self._repo, delta, self._builddeltaheader
+            ):
                 size += len(chunk)
                 yield chunk
 
@@ -977,17 +1112,20 @@
             fnodes,
             source,
             clstate[b'clrevtomanifestrev'],
+            sidedata_helpers=sidedata_helpers,
         )
 
         for tree, deltas in it:
             if tree:
-                assert self.version == b'03'
+                assert self.version in (b'03', b'04')
                 chunk = _fileheader(tree)
                 size += len(chunk)
                 yield chunk
 
             for delta in deltas:
-                chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
+                chunks = _revisiondeltatochunks(
+                    self._repo, delta, self._builddeltaheader
+                )
                 for chunk in chunks:
                     size += len(chunk)
                     yield chunk
@@ -1002,7 +1140,7 @@
         mfdicts = None
         if self._ellipses and self._isshallow:
             mfdicts = [
-                (self._repo.manifestlog[n].read(), lr)
+                (repo.manifestlog[n].read(), lr)
                 for (n, lr) in pycompat.iteritems(manifests)
             ]
 
@@ -1017,6 +1155,7 @@
             fastpathlinkrev,
             fnodes,
             clrevs,
+            sidedata_helpers=sidedata_helpers,
         )
 
         for path, deltas in it:
@@ -1025,7 +1164,9 @@
             yield h
 
             for delta in deltas:
-                chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
+                chunks = _revisiondeltatochunks(
+                    self._repo, delta, self._builddeltaheader
+                )
                 for chunk in chunks:
                     size += len(chunk)
                     yield chunk
@@ -1041,7 +1182,9 @@
         if clnodes:
             repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
 
-    def _generatechangelog(self, cl, nodes, generate=True):
+    def _generatechangelog(
+        self, cl, nodes, generate=True, sidedata_helpers=None
+    ):
         """Generate data for changelog chunks.
 
         Returns a 2-tuple of a dict containing state and an iterable of
@@ -1050,6 +1193,8 @@
 
         if generate is False, the state will be fully populated and no chunk
         stream will be yielded
+
+        See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
         """
         clrevorder = {}
         manifests = {}
@@ -1133,6 +1278,7 @@
             clrevtolocalrev={},
             fullclnodes=self._fullclnodes,
             precomputedellipsis=self._precomputedellipsis,
+            sidedata_helpers=sidedata_helpers,
         )
 
         return state, gen
@@ -1146,11 +1292,14 @@
         fnodes,
         source,
         clrevtolocalrev,
+        sidedata_helpers=None,
     ):
         """Returns an iterator of changegroup chunks containing manifests.
 
         `source` is unused here, but is used by extensions like remotefilelog to
         change what is sent based in pulls vs pushes, etc.
+
+        See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
         """
         repo = self._repo
         mfl = repo.manifestlog
@@ -1240,6 +1389,7 @@
                 clrevtolocalrev=clrevtolocalrev,
                 fullclnodes=self._fullclnodes,
                 precomputedellipsis=self._precomputedellipsis,
+                sidedata_helpers=sidedata_helpers,
             )
 
             if not self._oldmatcher.visitdir(store.tree[:-1]):
@@ -1278,6 +1428,7 @@
         fastpathlinkrev,
         fnodes,
         clrevs,
+        sidedata_helpers=None,
     ):
         changedfiles = [
             f
@@ -1372,6 +1523,7 @@
                 clrevtolocalrev=clrevtolocalrev,
                 fullclnodes=self._fullclnodes,
                 precomputedellipsis=self._precomputedellipsis,
+                sidedata_helpers=sidedata_helpers,
             )
 
             yield fname, deltas
@@ -1388,6 +1540,7 @@
     shallow=False,
     ellipsisroots=None,
     fullnodes=None,
+    remote_sidedata=None,
 ):
     builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
         d.node, d.p1node, d.p2node, d.linknode
@@ -1418,6 +1571,7 @@
     shallow=False,
     ellipsisroots=None,
     fullnodes=None,
+    remote_sidedata=None,
 ):
     builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
         d.node, d.p1node, d.p2node, d.basenode, d.linknode
@@ -1447,6 +1601,7 @@
     shallow=False,
     ellipsisroots=None,
     fullnodes=None,
+    remote_sidedata=None,
 ):
     builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
         d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
@@ -1467,12 +1622,47 @@
     )
 
 
+def _makecg4packer(
+    repo,
+    oldmatcher,
+    matcher,
+    bundlecaps,
+    ellipses=False,
+    shallow=False,
+    ellipsisroots=None,
+    fullnodes=None,
+    remote_sidedata=None,
+):
+    # Same header func as cg3. Sidedata is in a separate chunk from the delta to
+    # differenciate "raw delta" and sidedata.
+    builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
+        d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
+    )
+
+    return cgpacker(
+        repo,
+        oldmatcher,
+        matcher,
+        b'04',
+        builddeltaheader=builddeltaheader,
+        manifestsend=closechunk(),
+        bundlecaps=bundlecaps,
+        ellipses=ellipses,
+        shallow=shallow,
+        ellipsisroots=ellipsisroots,
+        fullnodes=fullnodes,
+        remote_sidedata=remote_sidedata,
+    )
+
+
 _packermap = {
     b'01': (_makecg1packer, cg1unpacker),
     # cg2 adds support for exchanging generaldelta
     b'02': (_makecg2packer, cg2unpacker),
     # cg3 adds support for exchanging revlog flags and treemanifests
     b'03': (_makecg3packer, cg3unpacker),
+    # ch4 adds support for exchanging sidedata
+    b'04': (_makecg4packer, cg4unpacker),
 }
 
 
@@ -1492,11 +1682,9 @@
         #
         # (or even to push subset of history)
         needv03 = True
-    if b'exp-sidedata-flag' in repo.requirements:
-        needv03 = True
-        # don't attempt to use 01/02 until we do sidedata cleaning
-        versions.discard(b'01')
-        versions.discard(b'02')
+    has_revlogv2 = requirements.REVLOGV2_REQUIREMENT in repo.requirements
+    if not has_revlogv2:
+        versions.discard(b'04')
     if not needv03:
         versions.discard(b'03')
     return versions
@@ -1543,7 +1731,7 @@
     # will support. For example, all hg versions that support generaldelta also
     # support changegroup 02.
     versions = supportedoutgoingversions(repo)
-    if b'generaldelta' in repo.requirements:
+    if requirements.GENERALDELTA_REQUIREMENT in repo.requirements:
         versions.discard(b'01')
     assert versions
     return min(versions)
@@ -1559,6 +1747,7 @@
     shallow=False,
     ellipsisroots=None,
     fullnodes=None,
+    remote_sidedata=None,
 ):
     assert version in supportedoutgoingversions(repo)
 
@@ -1595,6 +1784,7 @@
         shallow=shallow,
         ellipsisroots=ellipsisroots,
         fullnodes=fullnodes,
+        remote_sidedata=remote_sidedata,
     )
 
 
@@ -1638,8 +1828,15 @@
     fastpath=False,
     bundlecaps=None,
     matcher=None,
+    remote_sidedata=None,
 ):
-    bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher)
+    bundler = getbundler(
+        version,
+        repo,
+        bundlecaps=bundlecaps,
+        matcher=matcher,
+        remote_sidedata=remote_sidedata,
+    )
 
     repo = repo.unfiltered()
     commonrevs = outgoing.common
@@ -1658,7 +1855,15 @@
     return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
 
 
-def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
+def _addchangegroupfiles(
+    repo,
+    source,
+    revmap,
+    trp,
+    expectedfiles,
+    needfiles,
+    addrevisioncb=None,
+):
     revisions = 0
     files = 0
     progress = repo.ui.makeprogress(
@@ -1673,7 +1878,13 @@
         o = len(fl)
         try:
             deltas = source.deltaiter()
-            if not fl.addgroup(deltas, revmap, trp):
+            added = fl.addgroup(
+                deltas,
+                revmap,
+                trp,
+                addrevisioncb=addrevisioncb,
+            )
+            if not added:
                 raise error.Abort(_(b"received file revlog group is empty"))
         except error.CensoredBaseError as e:
             raise error.Abort(_(b"received delta base is censored: %s") % e)
@@ -1702,3 +1913,25 @@
                 )
 
     return revisions, files
+
+
+def get_sidedata_helpers(repo, remote_sd_categories, pull=False):
+    # Computers for computing sidedata on-the-fly
+    sd_computers = collections.defaultdict(list)
+    # Computers for categories to remove from sidedata
+    sd_removers = collections.defaultdict(list)
+
+    to_generate = remote_sd_categories - repo._wanted_sidedata
+    to_remove = repo._wanted_sidedata - remote_sd_categories
+    if pull:
+        to_generate, to_remove = to_remove, to_generate
+
+    for revlog_kind, computers in repo._sidedata_computers.items():
+        for category, computer in computers.items():
+            if category in to_generate:
+                sd_computers[revlog_kind].append(computer)
+            if category in to_remove:
+                sd_removers[revlog_kind].append(computer)
+
+    sidedata_helpers = (repo, sd_computers, sd_removers)
+    return sidedata_helpers
--- a/mercurial/changelog.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/changelog.py	Thu Mar 18 18:24:59 2021 -0400
@@ -191,7 +191,7 @@
     # Extensions might modify _defaultextra, so let the constructor below pass
     # it in
     extra = attr.ib()
-    manifest = attr.ib(default=nullid)
+    manifest = attr.ib()
     user = attr.ib(default=b'')
     date = attr.ib(default=(0, 0))
     files = attr.ib(default=attr.Factory(list))
@@ -200,6 +200,7 @@
     p1copies = attr.ib(default=None)
     p2copies = attr.ib(default=None)
     description = attr.ib(default=b'')
+    branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
 
 
 class changelogrevision(object):
@@ -218,9 +219,9 @@
         '_changes',
     )
 
-    def __new__(cls, text, sidedata, cpsd):
+    def __new__(cls, cl, text, sidedata, cpsd):
         if not text:
-            return _changelogrevision(extra=_defaultextra)
+            return _changelogrevision(extra=_defaultextra, manifest=nullid)
 
         self = super(changelogrevision, cls).__new__(cls)
         # We could return here and implement the following as an __init__.
@@ -372,9 +373,14 @@
     def description(self):
         return encoding.tolocal(self._text[self._offsets[3] + 2 :])
 
+    @property
+    def branchinfo(self):
+        extra = self.extra
+        return encoding.tolocal(extra.get(b"branch")), b'close' in extra
+
 
 class changelog(revlog.revlog):
-    def __init__(self, opener, trypending=False):
+    def __init__(self, opener, trypending=False, concurrencychecker=None):
         """Load a changelog revlog using an opener.
 
         If ``trypending`` is true, we attempt to load the index from a
@@ -383,6 +389,9 @@
         revision) data for a transaction that hasn't been finalized yet.
         It exists in a separate file to facilitate readers (such as
         hooks processes) accessing data before a transaction is finalized.
+
+        ``concurrencychecker`` will be passed to the revlog init function, see
+        the documentation there.
         """
         if trypending and opener.exists(b'00changelog.i.a'):
             indexfile = b'00changelog.i.a'
@@ -398,6 +407,7 @@
             checkambig=True,
             mmaplargeindex=True,
             persistentnodemap=opener.options.get(b'persistent-nodemap', False),
+            concurrencychecker=concurrencychecker,
         )
 
         if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
@@ -418,6 +428,7 @@
         self._filteredrevs = frozenset()
         self._filteredrevs_hashcache = {}
         self._copiesstorage = opener.options.get(b'copies-storage')
+        self.revlog_kind = b'changelog'
 
     @property
     def filteredrevs(self):
@@ -497,7 +508,7 @@
         if not self._delayed:
             revlog.revlog._enforceinlinesize(self, tr, fp)
 
-    def read(self, node):
+    def read(self, nodeorrev):
         """Obtain data from a parsed changelog revision.
 
         Returns a 6-tuple of:
@@ -513,9 +524,9 @@
         ``changelogrevision`` instead, as it is faster for partial object
         access.
         """
-        d, s = self._revisiondata(node)
+        d, s = self._revisiondata(nodeorrev)
         c = changelogrevision(
-            d, s, self._copiesstorage == b'changeset-sidedata'
+            self, d, s, self._copiesstorage == b'changeset-sidedata'
         )
         return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
 
@@ -523,14 +534,14 @@
         """Obtain a ``changelogrevision`` for a node or revision."""
         text, sidedata = self._revisiondata(nodeorrev)
         return changelogrevision(
-            text, sidedata, self._copiesstorage == b'changeset-sidedata'
+            self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
         )
 
-    def readfiles(self, node):
+    def readfiles(self, nodeorrev):
         """
         short version of read that only returns the files modified by the cset
         """
-        text = self.revision(node)
+        text = self.revision(nodeorrev)
         if not text:
             return []
         last = text.index(b"\n\n")
@@ -592,21 +603,21 @@
             parseddate = b"%s %s" % (parseddate, extra)
         l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
         text = b"\n".join(l)
-        return self.addrevision(
+        rev = self.addrevision(
             text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
         )
+        return self.node(rev)
 
     def branchinfo(self, rev):
         """return the branch name and open/close state of a revision
 
         This function exists because creating a changectx object
         just to access this is costly."""
-        extra = self.changelogrevision(rev).extra
-        return encoding.tolocal(extra.get(b"branch")), b'close' in extra
+        return self.changelogrevision(rev).branchinfo
 
-    def _nodeduplicatecallback(self, transaction, node):
+    def _nodeduplicatecallback(self, transaction, rev):
         # keep track of revisions that got "re-added", eg: unbunde of know rev.
         #
         # We track them in a list to preserve their order from the source bundle
         duplicates = transaction.changes.setdefault(b'revduplicates', [])
-        duplicates.append(self.rev(node))
+        duplicates.append(rev)
--- a/mercurial/cmdutil.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/cmdutil.py	Thu Mar 18 18:24:59 2021 -0400
@@ -2967,20 +2967,6 @@
 
         # Reroute the working copy parent to the new changeset
         repo.setparents(newid, nullid)
-        mapping = {old.node(): (newid,)}
-        obsmetadata = None
-        if opts.get(b'note'):
-            obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
-        backup = ui.configbool(b'rewrite', b'backup-bundle')
-        scmutil.cleanupnodes(
-            repo,
-            mapping,
-            b'amend',
-            metadata=obsmetadata,
-            fixphase=True,
-            targetphase=commitphase,
-            backup=backup,
-        )
 
         # Fixing the dirstate because localrepo.commitctx does not update
         # it. This is rather convenient because we did not need to update
@@ -3003,6 +2989,21 @@
         for f in removedfiles:
             dirstate.drop(f)
 
+        mapping = {old.node(): (newid,)}
+        obsmetadata = None
+        if opts.get(b'note'):
+            obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
+        backup = ui.configbool(b'rewrite', b'backup-bundle')
+        scmutil.cleanupnodes(
+            repo,
+            mapping,
+            b'amend',
+            metadata=obsmetadata,
+            fixphase=True,
+            targetphase=commitphase,
+            backup=backup,
+        )
+
     return newid
 
 
--- a/mercurial/commands.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/commands.py	Thu Mar 18 18:24:59 2021 -0400
@@ -1648,6 +1648,14 @@
     if complevel is not None:
         compopts[b'level'] = complevel
 
+    compthreads = ui.configint(
+        b'experimental', b'bundlecompthreads.' + bundlespec.compression
+    )
+    if compthreads is None:
+        compthreads = ui.configint(b'experimental', b'bundlecompthreads')
+    if compthreads is not None:
+        compopts[b'threads'] = compthreads
+
     # Bundling of obsmarker and phases is optional as not all clients
     # support the necessary features.
     cfg = ui.configbool
@@ -2550,7 +2558,7 @@
     if change:
         repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn')
         ctx2 = scmutil.revsingle(repo, change, None)
-        ctx1 = ctx2.p1()
+        ctx1 = logcmdutil.diff_parent(ctx2)
     elif from_rev or to_rev:
         repo = scmutil.unhidehashlikerevs(
             repo, [from_rev] + [to_rev], b'nowarn'
@@ -3821,132 +3829,138 @@
     output = []
     revs = []
 
-    if source:
-        source, branches = hg.parseurl(ui.expandpath(source))
-        peer = hg.peer(repo or ui, opts, source)  # only pass ui when no repo
-        repo = peer.local()
-        revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
-
-    fm = ui.formatter(b'identify', opts)
-    fm.startitem()
-
-    if not repo:
-        if num or branch or tags:
-            raise error.InputError(
-                _(b"can't query remote revision number, branch, or tags")
-            )
-        if not rev and revs:
-            rev = revs[0]
-        if not rev:
-            rev = b"tip"
-
-        remoterev = peer.lookup(rev)
-        hexrev = fm.hexfunc(remoterev)
-        if default or id:
-            output = [hexrev]
-        fm.data(id=hexrev)
-
-        @util.cachefunc
-        def getbms():
-            bms = []
-
-            if b'bookmarks' in peer.listkeys(b'namespaces'):
-                hexremoterev = hex(remoterev)
-                bms = [
-                    bm
-                    for bm, bmr in pycompat.iteritems(
-                        peer.listkeys(b'bookmarks')
+    peer = None
+    try:
+        if source:
+            source, branches = hg.parseurl(ui.expandpath(source))
+            # only pass ui when no repo
+            peer = hg.peer(repo or ui, opts, source)
+            repo = peer.local()
+            revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
+
+        fm = ui.formatter(b'identify', opts)
+        fm.startitem()
+
+        if not repo:
+            if num or branch or tags:
+                raise error.InputError(
+                    _(b"can't query remote revision number, branch, or tags")
+                )
+            if not rev and revs:
+                rev = revs[0]
+            if not rev:
+                rev = b"tip"
+
+            remoterev = peer.lookup(rev)
+            hexrev = fm.hexfunc(remoterev)
+            if default or id:
+                output = [hexrev]
+            fm.data(id=hexrev)
+
+            @util.cachefunc
+            def getbms():
+                bms = []
+
+                if b'bookmarks' in peer.listkeys(b'namespaces'):
+                    hexremoterev = hex(remoterev)
+                    bms = [
+                        bm
+                        for bm, bmr in pycompat.iteritems(
+                            peer.listkeys(b'bookmarks')
+                        )
+                        if bmr == hexremoterev
+                    ]
+
+                return sorted(bms)
+
+            if fm.isplain():
+                if bookmarks:
+                    output.extend(getbms())
+                elif default and not ui.quiet:
+                    # multiple bookmarks for a single parent separated by '/'
+                    bm = b'/'.join(getbms())
+                    if bm:
+                        output.append(bm)
+            else:
+                fm.data(node=hex(remoterev))
+                if bookmarks or b'bookmarks' in fm.datahint():
+                    fm.data(bookmarks=fm.formatlist(getbms(), name=b'bookmark'))
+        else:
+            if rev:
+                repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
+            ctx = scmutil.revsingle(repo, rev, None)
+
+            if ctx.rev() is None:
+                ctx = repo[None]
+                parents = ctx.parents()
+                taglist = []
+                for p in parents:
+                    taglist.extend(p.tags())
+
+                dirty = b""
+                if ctx.dirty(missing=True, merge=False, branch=False):
+                    dirty = b'+'
+                fm.data(dirty=dirty)
+
+                hexoutput = [fm.hexfunc(p.node()) for p in parents]
+                if default or id:
+                    output = [b"%s%s" % (b'+'.join(hexoutput), dirty)]
+                fm.data(id=b"%s%s" % (b'+'.join(hexoutput), dirty))
+
+                if num:
+                    numoutput = [b"%d" % p.rev() for p in parents]
+                    output.append(b"%s%s" % (b'+'.join(numoutput), dirty))
+
+                fm.data(
+                    parents=fm.formatlist(
+                        [fm.hexfunc(p.node()) for p in parents], name=b'node'
                     )
-                    if bmr == hexremoterev
-                ]
-
-            return sorted(bms)
-
-        if fm.isplain():
-            if bookmarks:
-                output.extend(getbms())
-            elif default and not ui.quiet:
+                )
+            else:
+                hexoutput = fm.hexfunc(ctx.node())
+                if default or id:
+                    output = [hexoutput]
+                fm.data(id=hexoutput)
+
+                if num:
+                    output.append(pycompat.bytestr(ctx.rev()))
+                taglist = ctx.tags()
+
+            if default and not ui.quiet:
+                b = ctx.branch()
+                if b != b'default':
+                    output.append(b"(%s)" % b)
+
+                # multiple tags for a single parent separated by '/'
+                t = b'/'.join(taglist)
+                if t:
+                    output.append(t)
+
                 # multiple bookmarks for a single parent separated by '/'
-                bm = b'/'.join(getbms())
+                bm = b'/'.join(ctx.bookmarks())
                 if bm:
                     output.append(bm)
-        else:
-            fm.data(node=hex(remoterev))
-            if bookmarks or b'bookmarks' in fm.datahint():
-                fm.data(bookmarks=fm.formatlist(getbms(), name=b'bookmark'))
-    else:
-        if rev:
-            repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
-        ctx = scmutil.revsingle(repo, rev, None)
-
-        if ctx.rev() is None:
-            ctx = repo[None]
-            parents = ctx.parents()
-            taglist = []
-            for p in parents:
-                taglist.extend(p.tags())
-
-            dirty = b""
-            if ctx.dirty(missing=True, merge=False, branch=False):
-                dirty = b'+'
-            fm.data(dirty=dirty)
-
-            hexoutput = [fm.hexfunc(p.node()) for p in parents]
-            if default or id:
-                output = [b"%s%s" % (b'+'.join(hexoutput), dirty)]
-            fm.data(id=b"%s%s" % (b'+'.join(hexoutput), dirty))
-
-            if num:
-                numoutput = [b"%d" % p.rev() for p in parents]
-                output.append(b"%s%s" % (b'+'.join(numoutput), dirty))
-
-            fm.data(
-                parents=fm.formatlist(
-                    [fm.hexfunc(p.node()) for p in parents], name=b'node'
-                )
-            )
-        else:
-            hexoutput = fm.hexfunc(ctx.node())
-            if default or id:
-                output = [hexoutput]
-            fm.data(id=hexoutput)
-
-            if num:
-                output.append(pycompat.bytestr(ctx.rev()))
-            taglist = ctx.tags()
-
-        if default and not ui.quiet:
-            b = ctx.branch()
-            if b != b'default':
-                output.append(b"(%s)" % b)
-
-            # multiple tags for a single parent separated by '/'
-            t = b'/'.join(taglist)
-            if t:
-                output.append(t)
-
-            # multiple bookmarks for a single parent separated by '/'
-            bm = b'/'.join(ctx.bookmarks())
-            if bm:
-                output.append(bm)
-        else:
-            if branch:
-                output.append(ctx.branch())
-
-            if tags:
-                output.extend(taglist)
-
-            if bookmarks:
-                output.extend(ctx.bookmarks())
-
-        fm.data(node=ctx.hex())
-        fm.data(branch=ctx.branch())
-        fm.data(tags=fm.formatlist(taglist, name=b'tag', sep=b':'))
-        fm.data(bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'))
-        fm.context(ctx=ctx)
-
-    fm.plain(b"%s\n" % b' '.join(output))
-    fm.end()
+            else:
+                if branch:
+                    output.append(ctx.branch())
+
+                if tags:
+                    output.extend(taglist)
+
+                if bookmarks:
+                    output.extend(ctx.bookmarks())
+
+            fm.data(node=ctx.hex())
+            fm.data(branch=ctx.branch())
+            fm.data(tags=fm.formatlist(taglist, name=b'tag', sep=b':'))
+            fm.data(bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'))
+            fm.context(ctx=ctx)
+
+        fm.plain(b"%s\n" % b' '.join(output))
+        fm.end()
+    finally:
+        if peer:
+            peer.close()
 
 
 @command(
@@ -4292,12 +4306,15 @@
             ui.expandpath(source), opts.get(b'branch')
         )
         other = hg.peer(repo, opts, source)
-        if b'bookmarks' not in other.listkeys(b'namespaces'):
-            ui.warn(_(b"remote doesn't support bookmarks\n"))
-            return 0
-        ui.pager(b'incoming')
-        ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
-        return bookmarks.incoming(ui, repo, other)
+        try:
+            if b'bookmarks' not in other.listkeys(b'namespaces'):
+                ui.warn(_(b"remote doesn't support bookmarks\n"))
+                return 0
+            ui.pager(b'incoming')
+            ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
+            return bookmarks.incoming(ui, repo, other)
+        finally:
+            other.close()
 
     repo._subtoppath = ui.expandpath(source)
     try:
@@ -4328,7 +4345,8 @@
     Returns 0 on success.
     """
     opts = pycompat.byteskwargs(opts)
-    hg.peer(ui, opts, ui.expandpath(dest), create=True)
+    peer = hg.peer(ui, opts, ui.expandpath(dest), create=True)
+    peer.close()
 
 
 @command(
@@ -4937,7 +4955,7 @@
     """
     # hg._outgoing() needs to re-resolve the path in order to handle #branch
     # style URLs, so don't overwrite dest.
-    path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
+    path = ui.getpath(dest, default=(b'default-push', b'default'))
     if not path:
         raise error.ConfigError(
             _(b'default repository not configured!'),
@@ -4964,12 +4982,15 @@
     if opts.get(b'bookmarks'):
         dest = path.pushloc or path.loc
         other = hg.peer(repo, opts, dest)
-        if b'bookmarks' not in other.listkeys(b'namespaces'):
-            ui.warn(_(b"remote doesn't support bookmarks\n"))
-            return 0
-        ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
-        ui.pager(b'outgoing')
-        return bookmarks.outgoing(ui, repo, other)
+        try:
+            if b'bookmarks' not in other.listkeys(b'namespaces'):
+                ui.warn(_(b"remote doesn't support bookmarks\n"))
+                return 0
+            ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
+            ui.pager(b'outgoing')
+            return bookmarks.outgoing(ui, repo, other)
+        finally:
+            other.close()
 
     repo._subtoppath = path.pushloc or path.loc
     try:
@@ -5244,9 +5265,11 @@
     :optupdate: updating working directory is needed or not
     :checkout: update destination revision (or None to default destination)
     :brev: a name, which might be a bookmark to be activated after updating
+
+    return True if update raise any conflict, False otherwise.
     """
     if modheads == 0:
-        return
+        return False
     if optupdate:
         try:
             return hg.updatetotally(ui, repo, checkout, brev)
@@ -5268,6 +5291,7 @@
             ui.status(_(b"(run 'hg heads' to see heads)\n"))
     elif not ui.configbool(b'commands', b'update.requiredest'):
         ui.status(_(b"(run 'hg update' to get a working copy)\n"))
+    return False
 
 
 @command(
@@ -5308,11 +5332,11 @@
         ),
     ]
     + remoteopts,
-    _(b'[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'),
+    _(b'[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]...'),
     helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
     helpbasic=True,
 )
-def pull(ui, repo, source=b"default", **opts):
+def pull(ui, repo, *sources, **opts):
     """pull changes from the specified source
 
     Pull changes from a remote repository to a local one.
@@ -5336,6 +5360,10 @@
     If SOURCE is omitted, the 'default' path will be used.
     See :hg:`help urls` for more information.
 
+    If multiple sources are specified, they will be pulled sequentially as if
+    the command was run multiple time. If --update is specify and the command
+    will stop at the first failed --update.
+
     Specifying bookmark as ``.`` is equivalent to specifying the active
     bookmark's name.
 
@@ -5350,101 +5378,215 @@
         hint = _(b'use hg pull followed by hg update DEST')
         raise error.InputError(msg, hint=hint)
 
-    source, branches = hg.parseurl(ui.expandpath(source), opts.get(b'branch'))
-    ui.status(_(b'pulling from %s\n') % util.hidepassword(source))
-    ui.flush()
-    other = hg.peer(repo, opts, source)
-    try:
-        revs, checkout = hg.addbranchrevs(
-            repo, other, branches, opts.get(b'rev')
+    if not sources:
+        sources = [b'default']
+    for source in sources:
+        source, branches = hg.parseurl(
+            ui.expandpath(source), opts.get(b'branch')
         )
-
-        pullopargs = {}
-
-        nodes = None
-        if opts.get(b'bookmark') or revs:
-            # The list of bookmark used here is the same used to actually update
-            # the bookmark names, to avoid the race from issue 4689 and we do
-            # all lookup and bookmark queries in one go so they see the same
-            # version of the server state (issue 4700).
-            nodes = []
-            fnodes = []
-            revs = revs or []
-            if revs and not other.capable(b'lookup'):
-                err = _(
-                    b"other repository doesn't support revision lookup, "
-                    b"so a rev cannot be specified."
-                )
-                raise error.Abort(err)
-            with other.commandexecutor() as e:
-                fremotebookmarks = e.callcommand(
-                    b'listkeys', {b'namespace': b'bookmarks'}
-                )
-                for r in revs:
-                    fnodes.append(e.callcommand(b'lookup', {b'key': r}))
-            remotebookmarks = fremotebookmarks.result()
-            remotebookmarks = bookmarks.unhexlifybookmarks(remotebookmarks)
-            pullopargs[b'remotebookmarks'] = remotebookmarks
-            for b in opts.get(b'bookmark', []):
-                b = repo._bookmarks.expandname(b)
-                if b not in remotebookmarks:
-                    raise error.InputError(
-                        _(b'remote bookmark %s not found!') % b
+        ui.status(_(b'pulling from %s\n') % util.hidepassword(source))
+        ui.flush()
+        other = hg.peer(repo, opts, source)
+        update_conflict = None
+        try:
+            revs, checkout = hg.addbranchrevs(
+                repo, other, branches, opts.get(b'rev')
+            )
+
+            pullopargs = {}
+
+            nodes = None
+            if opts.get(b'bookmark') or revs:
+                # The list of bookmark used here is the same used to actually update
+                # the bookmark names, to avoid the race from issue 4689 and we do
+                # all lookup and bookmark queries in one go so they see the same
+                # version of the server state (issue 4700).
+                nodes = []
+                fnodes = []
+                revs = revs or []
+                if revs and not other.capable(b'lookup'):
+                    err = _(
+                        b"other repository doesn't support revision lookup, "
+                        b"so a rev cannot be specified."
+                    )
+                    raise error.Abort(err)
+                with other.commandexecutor() as e:
+                    fremotebookmarks = e.callcommand(
+                        b'listkeys', {b'namespace': b'bookmarks'}
+                    )
+                    for r in revs:
+                        fnodes.append(e.callcommand(b'lookup', {b'key': r}))
+                remotebookmarks = fremotebookmarks.result()
+                remotebookmarks = bookmarks.unhexlifybookmarks(remotebookmarks)
+                pullopargs[b'remotebookmarks'] = remotebookmarks
+                for b in opts.get(b'bookmark', []):
+                    b = repo._bookmarks.expandname(b)
+                    if b not in remotebookmarks:
+                        raise error.InputError(
+                            _(b'remote bookmark %s not found!') % b
+                        )
+                    nodes.append(remotebookmarks[b])
+                for i, rev in enumerate(revs):
+                    node = fnodes[i].result()
+                    nodes.append(node)
+                    if rev == checkout:
+                        checkout = node
+
+            wlock = util.nullcontextmanager()
+            if opts.get(b'update'):
+                wlock = repo.wlock()
+            with wlock:
+                pullopargs.update(opts.get(b'opargs', {}))
+                modheads = exchange.pull(
+                    repo,
+                    other,
+                    heads=nodes,
+                    force=opts.get(b'force'),
+                    bookmarks=opts.get(b'bookmark', ()),
+                    opargs=pullopargs,
+                    confirm=opts.get(b'confirm'),
+                ).cgresult
+
+                # brev is a name, which might be a bookmark to be activated at
+                # the end of the update. In other words, it is an explicit
+                # destination of the update
+                brev = None
+
+                if checkout:
+                    checkout = repo.unfiltered().changelog.rev(checkout)
+
+                    # order below depends on implementation of
+                    # hg.addbranchrevs(). opts['bookmark'] is ignored,
+                    # because 'checkout' is determined without it.
+                    if opts.get(b'rev'):
+                        brev = opts[b'rev'][0]
+                    elif opts.get(b'branch'):
+                        brev = opts[b'branch'][0]
+                    else:
+                        brev = branches[0]
+                repo._subtoppath = source
+                try:
+                    update_conflict = postincoming(
+                        ui, repo, modheads, opts.get(b'update'), checkout, brev
                     )
-                nodes.append(remotebookmarks[b])
-            for i, rev in enumerate(revs):
-                node = fnodes[i].result()
-                nodes.append(node)
-                if rev == checkout:
-                    checkout = node
-
-        wlock = util.nullcontextmanager()
-        if opts.get(b'update'):
-            wlock = repo.wlock()
-        with wlock:
-            pullopargs.update(opts.get(b'opargs', {}))
-            modheads = exchange.pull(
-                repo,
-                other,
-                heads=nodes,
-                force=opts.get(b'force'),
-                bookmarks=opts.get(b'bookmark', ()),
-                opargs=pullopargs,
-                confirm=opts.get(b'confirm'),
-            ).cgresult
-
-            # brev is a name, which might be a bookmark to be activated at
-            # the end of the update. In other words, it is an explicit
-            # destination of the update
-            brev = None
-
-            if checkout:
-                checkout = repo.unfiltered().changelog.rev(checkout)
-
-                # order below depends on implementation of
-                # hg.addbranchrevs(). opts['bookmark'] is ignored,
-                # because 'checkout' is determined without it.
-                if opts.get(b'rev'):
-                    brev = opts[b'rev'][0]
-                elif opts.get(b'branch'):
-                    brev = opts[b'branch'][0]
-                else:
-                    brev = branches[0]
-            repo._subtoppath = source
-            try:
-                ret = postincoming(
-                    ui, repo, modheads, opts.get(b'update'), checkout, brev
-                )
-            except error.FilteredRepoLookupError as exc:
-                msg = _(b'cannot update to target: %s') % exc.args[0]
-                exc.args = (msg,) + exc.args[1:]
-                raise
-            finally:
-                del repo._subtoppath
-
-    finally:
-        other.close()
-    return ret
+                except error.FilteredRepoLookupError as exc:
+                    msg = _(b'cannot update to target: %s') % exc.args[0]
+                    exc.args = (msg,) + exc.args[1:]
+                    raise
+                finally:
+                    del repo._subtoppath
+
+        finally:
+            other.close()
+        # skip the remaining pull source if they are some conflict.
+        if update_conflict:
+            break
+    if update_conflict:
+        return 1
+    else:
+        return 0
+
+
+@command(
+    b'purge|clean',
+    [
+        (b'a', b'abort-on-err', None, _(b'abort if an error occurs')),
+        (b'', b'all', None, _(b'purge ignored files too')),
+        (b'i', b'ignored', None, _(b'purge only ignored files')),
+        (b'', b'dirs', None, _(b'purge empty directories')),
+        (b'', b'files', None, _(b'purge files')),
+        (b'p', b'print', None, _(b'print filenames instead of deleting them')),
+        (
+            b'0',
+            b'print0',
+            None,
+            _(
+                b'end filenames with NUL, for use with xargs'
+                b' (implies -p/--print)'
+            ),
+        ),
+        (b'', b'confirm', None, _(b'ask before permanently deleting files')),
+    ]
+    + cmdutil.walkopts,
+    _(b'hg purge [OPTION]... [DIR]...'),
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
+)
+def purge(ui, repo, *dirs, **opts):
+    """removes files not tracked by Mercurial
+
+    Delete files not known to Mercurial. This is useful to test local
+    and uncommitted changes in an otherwise-clean source tree.
+
+    This means that purge will delete the following by default:
+
+    - Unknown files: files marked with "?" by :hg:`status`
+    - Empty directories: in fact Mercurial ignores directories unless
+      they contain files under source control management
+
+    But it will leave untouched:
+
+    - Modified and unmodified tracked files
+    - Ignored files (unless -i or --all is specified)
+    - New files added to the repository (with :hg:`add`)
+
+    The --files and --dirs options can be used to direct purge to delete
+    only files, only directories, or both. If neither option is given,
+    both will be deleted.
+
+    If directories are given on the command line, only files in these
+    directories are considered.
+
+    Be careful with purge, as you could irreversibly delete some files
+    you forgot to add to the repository. If you only want to print the
+    list of files that this program would delete, use the --print
+    option.
+    """
+    opts = pycompat.byteskwargs(opts)
+    cmdutil.check_at_most_one_arg(opts, b'all', b'ignored')
+
+    act = not opts.get(b'print')
+    eol = b'\n'
+    if opts.get(b'print0'):
+        eol = b'\0'
+        act = False  # --print0 implies --print
+    if opts.get(b'all', False):
+        ignored = True
+        unknown = True
+    else:
+        ignored = opts.get(b'ignored', False)
+        unknown = not ignored
+
+    removefiles = opts.get(b'files')
+    removedirs = opts.get(b'dirs')
+    confirm = opts.get(b'confirm')
+    if confirm is None:
+        try:
+            extensions.find(b'purge')
+            confirm = False
+        except KeyError:
+            confirm = True
+
+    if not removefiles and not removedirs:
+        removefiles = True
+        removedirs = True
+
+    match = scmutil.match(repo[None], dirs, opts)
+
+    paths = mergemod.purge(
+        repo,
+        match,
+        unknown=unknown,
+        ignored=ignored,
+        removeemptydirs=removedirs,
+        removefiles=removefiles,
+        abortonerror=opts.get(b'abort_on_err'),
+        noop=not act,
+        confirm=confirm,
+    )
+
+    for path in paths:
+        if not act:
+            ui.write(b'%s%s' % (path, eol))
 
 
 @command(
@@ -5482,11 +5624,11 @@
         ),
     ]
     + remoteopts,
-    _(b'[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'),
+    _(b'[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]...'),
     helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
     helpbasic=True,
 )
-def push(ui, repo, dest=None, **opts):
+def push(ui, repo, *dests, **opts):
     """push changes to the specified destination
 
     Push changesets from the local repository to the specified
@@ -5522,6 +5664,9 @@
     Please see :hg:`help urls` for important details about ``ssh://``
     URLs. If DESTINATION is omitted, a default path will be used.
 
+    When passed multiple destinations, push will process them one after the
+    other, but stop should an error occur.
+
     .. container:: verbose
 
         The --pushvars option sends strings to the server that become
@@ -5566,75 +5711,97 @@
                 # this lets simultaneous -r, -b options continue working
                 opts.setdefault(b'rev', []).append(b"null")
 
-    path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
-    if not path:
-        raise error.ConfigError(
-            _(b'default repository not configured!'),
-            hint=_(b"see 'hg help config.paths'"),
-        )
-    dest = path.pushloc or path.loc
-    branches = (path.branch, opts.get(b'branch') or [])
-    ui.status(_(b'pushing to %s\n') % util.hidepassword(dest))
-    revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
-    other = hg.peer(repo, opts, dest)
-
-    if revs:
-        revs = [repo[r].node() for r in scmutil.revrange(repo, revs)]
-        if not revs:
-            raise error.InputError(
-                _(b"specified revisions evaluate to an empty set"),
-                hint=_(b"use different revision arguments"),
+    if not dests:
+        dests = [None]
+    some_pushed = False
+    result = 0
+    for dest in dests:
+        path = ui.getpath(dest, default=(b'default-push', b'default'))
+        if not path:
+            raise error.ConfigError(
+                _(b'default repository not configured!'),
+                hint=_(b"see 'hg help config.paths'"),
             )
-    elif path.pushrev:
-        # It doesn't make any sense to specify ancestor revisions. So limit
-        # to DAG heads to make discovery simpler.
-        expr = revsetlang.formatspec(b'heads(%r)', path.pushrev)
-        revs = scmutil.revrange(repo, [expr])
-        revs = [repo[rev].node() for rev in revs]
-        if not revs:
-            raise error.InputError(
-                _(b'default push revset for path evaluates to an empty set')
-            )
-    elif ui.configbool(b'commands', b'push.require-revs'):
-        raise error.InputError(
-            _(b'no revisions specified to push'),
-            hint=_(b'did you mean "hg push -r ."?'),
+        dest = path.pushloc or path.loc
+        branches = (path.branch, opts.get(b'branch') or [])
+        ui.status(_(b'pushing to %s\n') % util.hidepassword(dest))
+        revs, checkout = hg.addbranchrevs(
+            repo, repo, branches, opts.get(b'rev')
         )
-
-    repo._subtoppath = dest
-    try:
-        # push subrepos depth-first for coherent ordering
-        c = repo[b'.']
-        subs = c.substate  # only repos that are committed
-        for s in sorted(subs):
-            result = c.sub(s).push(opts)
-            if result == 0:
-                return not result
-    finally:
-        del repo._subtoppath
-
-    opargs = dict(opts.get(b'opargs', {}))  # copy opargs since we may mutate it
-    opargs.setdefault(b'pushvars', []).extend(opts.get(b'pushvars', []))
-
-    pushop = exchange.push(
-        repo,
-        other,
-        opts.get(b'force'),
-        revs=revs,
-        newbranch=opts.get(b'new_branch'),
-        bookmarks=opts.get(b'bookmark', ()),
-        publish=opts.get(b'publish'),
-        opargs=opargs,
-    )
-
-    result = not pushop.cgresult
-
-    if pushop.bkresult is not None:
-        if pushop.bkresult == 2:
-            result = 2
-        elif not result and pushop.bkresult:
-            result = 2
-
+        other = hg.peer(repo, opts, dest)
+
+        try:
+            if revs:
+                revs = [repo[r].node() for r in scmutil.revrange(repo, revs)]
+                if not revs:
+                    raise error.InputError(
+                        _(b"specified revisions evaluate to an empty set"),
+                        hint=_(b"use different revision arguments"),
+                    )
+            elif path.pushrev:
+                # It doesn't make any sense to specify ancestor revisions. So limit
+                # to DAG heads to make discovery simpler.
+                expr = revsetlang.formatspec(b'heads(%r)', path.pushrev)
+                revs = scmutil.revrange(repo, [expr])
+                revs = [repo[rev].node() for rev in revs]
+                if not revs:
+                    raise error.InputError(
+                        _(
+                            b'default push revset for path evaluates to an empty set'
+                        )
+                    )
+            elif ui.configbool(b'commands', b'push.require-revs'):
+                raise error.InputError(
+                    _(b'no revisions specified to push'),
+                    hint=_(b'did you mean "hg push -r ."?'),
+                )
+
+            repo._subtoppath = dest
+            try:
+                # push subrepos depth-first for coherent ordering
+                c = repo[b'.']
+                subs = c.substate  # only repos that are committed
+                for s in sorted(subs):
+                    sub_result = c.sub(s).push(opts)
+                    if sub_result == 0:
+                        return 1
+            finally:
+                del repo._subtoppath
+
+            opargs = dict(
+                opts.get(b'opargs', {})
+            )  # copy opargs since we may mutate it
+            opargs.setdefault(b'pushvars', []).extend(opts.get(b'pushvars', []))
+
+            pushop = exchange.push(
+                repo,
+                other,
+                opts.get(b'force'),
+                revs=revs,
+                newbranch=opts.get(b'new_branch'),
+                bookmarks=opts.get(b'bookmark', ()),
+                publish=opts.get(b'publish'),
+                opargs=opargs,
+            )
+
+            if pushop.cgresult == 0:
+                result = 1
+            elif pushop.cgresult is not None:
+                some_pushed = True
+
+            if pushop.bkresult is not None:
+                if pushop.bkresult == 2:
+                    result = 2
+                elif not result and pushop.bkresult:
+                    result = 2
+
+            if result:
+                break
+
+        finally:
+            other.close()
+    if result == 0 and not some_pushed:
+        result = 1
     return result
 
 
@@ -6083,7 +6250,7 @@
             if hint:
                 ui.warn(hint)
 
-    unresolvedf = list(ms.unresolved())
+    unresolvedf = ms.unresolvedcount()
     if not unresolvedf:
         ui.status(_(b'(no more unresolved files)\n'))
         cmdutil.checkafterresolved(repo)
@@ -7428,7 +7595,10 @@
                 )
             modheads = bundle2.combinechangegroupresults(op)
 
-    return postincoming(ui, repo, modheads, opts.get('update'), None, None)
+    if postincoming(ui, repo, modheads, opts.get('update'), None, None):
+        return 1
+    else:
+        return 0
 
 
 @command(
--- a/mercurial/commit.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/commit.py	Thu Mar 18 18:24:59 2021 -0400
@@ -96,6 +96,10 @@
             ctx.date(),
             extra,
         )
+        rev = repo[n].rev()
+        if oldtip != repo.changelog.tiprev():
+            repo.register_changeset(rev, repo.changelog.changelogrevision(rev))
+
         xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
         repo.hook(
             b'pretxncommit',
@@ -108,7 +112,7 @@
         targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
 
         # prevent unmarking changesets as public on recommit
-        waspublic = oldtip == repo.changelog.tiprev() and not repo[n].phase()
+        waspublic = oldtip == repo.changelog.tiprev() and not repo[rev].phase()
 
         if targetphase and not waspublic:
             # retract boundary do not alter parent changeset.
@@ -116,7 +120,7 @@
             # be compliant anyway
             #
             # if minimal phase was 0 we don't need to retract anything
-            phases.registernew(repo, tr, targetphase, [repo[n].rev()])
+            phases.registernew(repo, tr, targetphase, [rev])
         return n
 
 
--- a/mercurial/config.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/config.py	Thu Mar 18 18:24:59 2021 -0400
@@ -22,14 +22,19 @@
 
 class config(object):
     def __init__(self, data=None):
+        self._current_source_level = 0
         self._data = {}
         self._unset = []
         if data:
             for k in data._data:
                 self._data[k] = data[k].copy()
-            self._source = data._source.copy()
-        else:
-            self._source = util.cowdict()
+            self._current_source_level = data._current_source_level + 1
+
+    def new_source(self):
+        """increment the source counter
+
+        This is used to define source priority when reading"""
+        self._current_source_level += 1
 
     def copy(self):
         return config(self)
@@ -48,45 +53,66 @@
             yield d
 
     def update(self, src):
-        self._source = self._source.preparewrite()
+        current_level = self._current_source_level
+        current_level += 1
+        max_level = self._current_source_level
         for s, n in src._unset:
             ds = self._data.get(s, None)
             if ds is not None and n in ds:
                 self._data[s] = ds.preparewrite()
                 del self._data[s][n]
-                del self._source[(s, n)]
         for s in src:
             ds = self._data.get(s, None)
             if ds:
                 self._data[s] = ds.preparewrite()
             else:
                 self._data[s] = util.cowsortdict()
-            self._data[s].update(src._data[s])
-        self._source.update(src._source)
+            for k, v in src._data[s].items():
+                value, source, level = v
+                level += current_level
+                max_level = max(level, current_level)
+                self._data[s][k] = (value, source, level)
+        self._current_source_level = max_level
+
+    def _get(self, section, item):
+        return self._data.get(section, {}).get(item)
 
     def get(self, section, item, default=None):
-        return self._data.get(section, {}).get(item, default)
+        result = self._get(section, item)
+        if result is None:
+            return default
+        return result[0]
 
-    def backup(self, section, item):
+    def backup(self, section, key):
         """return a tuple allowing restore to reinstall a previous value
 
         The main reason we need it is because it handles the "no data" case.
         """
         try:
-            value = self._data[section][item]
-            source = self.source(section, item)
-            return (section, item, value, source)
+            item = self._data[section][key]
         except KeyError:
-            return (section, item)
+            return (section, key)
+        else:
+            return (section, key) + item
 
     def source(self, section, item):
-        return self._source.get((section, item), b"")
+        result = self._get(section, item)
+        if result is None:
+            return b""
+        return result[1]
+
+    def level(self, section, item):
+        result = self._get(section, item)
+        if result is None:
+            return None
+        return result[2]
 
     def sections(self):
         return sorted(self._data.keys())
 
     def items(self, section):
-        return list(pycompat.iteritems(self._data.get(section, {})))
+        items = pycompat.iteritems(self._data.get(section, {}))
+        return [(k, v[0]) for (k, v) in items]
 
     def set(self, section, item, value, source=b""):
         if pycompat.ispy3:
@@ -103,26 +129,31 @@
             self._data[section] = util.cowsortdict()
         else:
             self._data[section] = self._data[section].preparewrite()
-        self._data[section][item] = value
-        if source:
-            self._source = self._source.preparewrite()
-            self._source[(section, item)] = source
+        self._data[section][item] = (value, source, self._current_source_level)
+
+    def alter(self, section, key, new_value):
+        """alter a value without altering its source or level
+
+        This method is meant to be used by `ui.fixconfig` only."""
+        item = self._data[section][key]
+        size = len(item)
+        new_item = (new_value,) + item[1:]
+        assert len(new_item) == size
+        self._data[section][key] = new_item
 
     def restore(self, data):
         """restore data returned by self.backup"""
-        self._source = self._source.preparewrite()
-        if len(data) == 4:
+        if len(data) != 2:
             # restore old data
-            section, item, value, source = data
+            section, key = data[:2]
+            item = data[2:]
             self._data[section] = self._data[section].preparewrite()
-            self._data[section][item] = value
-            self._source[(section, item)] = source
+            self._data[section][key] = item
         else:
             # no data before, remove everything
             section, item = data
             if section in self._data:
                 self._data[section].pop(item, None)
-            self._source.pop((section, item), None)
 
     def parse(self, src, data, sections=None, remap=None, include=None):
         sectionre = util.re.compile(br'\[([^\[]+)\]')
@@ -206,6 +237,7 @@
             raise error.ConfigError(message, (b"%s:%d" % (src, line)))
 
     def read(self, path, fp=None, sections=None, remap=None):
+        self.new_source()
         if not fp:
             fp = util.posixfile(path, b'rb')
         assert (
@@ -220,6 +252,8 @@
         def include(rel, remap, sections):
             abs = os.path.normpath(os.path.join(dir, rel))
             self.read(abs, remap=remap, sections=sections)
+            # anything after the include has a higher level
+            self.new_source()
 
         self.parse(
             path, fp.read(), sections=sections, remap=remap, include=include
--- a/mercurial/configitems.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/configitems.py	Thu Mar 18 18:24:59 2021 -0400
@@ -570,11 +570,21 @@
     default=0,
 )
 coreconfigitem(
+    b'convert',
+    b'svn.dangerous-set-commit-dates',
+    default=False,
+)
+coreconfigitem(
     b'debug',
     b'dirstate.delaywrite',
     default=0,
 )
 coreconfigitem(
+    b'debug',
+    b'revlog.verifyposition.changelog',
+    default=b'',
+)
+coreconfigitem(
     b'defaults',
     b'.*',
     default=None,
@@ -610,6 +620,12 @@
     b'check-relroot',
     default=False,
 )
+# Track copy information for all file, not just "added" one (very slow)
+coreconfigitem(
+    b'devel',
+    b'copy-tracing.trace-all-files',
+    default=False,
+)
 coreconfigitem(
     b'devel',
     b'default-date',
@@ -689,6 +705,11 @@
 )
 coreconfigitem(
     b'devel',
+    b'copy-tracing.multi-thread',
+    default=True,
+)
+coreconfigitem(
+    b'devel',
     b'debug.extensions',
     default=False,
 )
@@ -729,8 +750,26 @@
     b'discovery.randomize',
     default=True,
 )
+# Control the initial size of the discovery sample
+coreconfigitem(
+    b'devel',
+    b'discovery.sample-size',
+    default=200,
+)
+# Control the initial size of the discovery for initial change
+coreconfigitem(
+    b'devel',
+    b'discovery.sample-size.initial',
+    default=100,
+)
 _registerdiffopts(section=b'diff')
 coreconfigitem(
+    b'diff',
+    b'merge',
+    default=False,
+    experimental=True,
+)
+coreconfigitem(
     b'email',
     b'bcc',
     default=None,
@@ -827,6 +866,31 @@
 )
 coreconfigitem(
     b'experimental',
+    b'bundlecompthreads',
+    default=None,
+)
+coreconfigitem(
+    b'experimental',
+    b'bundlecompthreads.bzip2',
+    default=None,
+)
+coreconfigitem(
+    b'experimental',
+    b'bundlecompthreads.gzip',
+    default=None,
+)
+coreconfigitem(
+    b'experimental',
+    b'bundlecompthreads.none',
+    default=None,
+)
+coreconfigitem(
+    b'experimental',
+    b'bundlecompthreads.zstd',
+    default=None,
+)
+coreconfigitem(
+    b'experimental',
     b'changegroup3',
     default=False,
 )
@@ -1258,6 +1322,20 @@
     b'use-persistent-nodemap',
     default=False,
 )
+# TODO needs to grow a docket file to at least store the last offset of the data
+# file when rewriting sidedata.
+# Will also need a way of dealing with garbage data if we allow rewriting
+# *existing* sidedata.
+# Exchange-wise, we will also need to do something more efficient than keeping
+# references to the affected revlogs, especially memory-wise when rewriting
+# sidedata.
+# Also... compress the sidedata? (this should be coming very soon)
+coreconfigitem(
+    b'format',
+    b'exp-revlogv2.2',
+    default=False,
+    experimental=True,
+)
 coreconfigitem(
     b'format',
     b'exp-use-copies-side-data-changeset',
--- a/mercurial/context.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/context.py	Thu Mar 18 18:24:59 2021 -0400
@@ -2599,6 +2599,7 @@
             b'flags': flags,
             b'copied': copied,
         }
+        util.clearcachedproperty(self, b'_manifest')
 
     def filectx(self, path, filelog=None):
         return overlayworkingfilectx(
--- a/mercurial/copies.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/copies.py	Thu Mar 18 18:24:59 2021 -0400
@@ -59,14 +59,13 @@
     # Cases 1, 3, and 5 are then removed by _filter().
 
     for k, v in list(t.items()):
-        # remove copies from files that didn't exist
-        if v not in src:
+        if k == v:  # case 3
             del t[k]
-        # remove criss-crossed copies
-        elif k in src and v in dst:
+        elif v not in src:  # case 5
+            # remove copies from files that didn't exist
             del t[k]
-        # remove copies to files that were then removed
-        elif k not in dst:
+        elif k not in dst:  # case 1
+            # remove copies to files that were then removed
             del t[k]
 
 
@@ -153,13 +152,21 @@
     if b.p1() == a and b.p2().node() == nullid:
         filesmatcher = matchmod.exact(b.files())
         forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
-    missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
+    if repo.ui.configbool(b'devel', b'copy-tracing.trace-all-files'):
+        missing = list(b.walk(match))
+        # _computeforwardmissing(a, b, match=forwardmissingmatch)
+        if debug:
+            dbg(b'debug.copies:      searching all files: %d\n' % len(missing))
+    else:
+        missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
+        if debug:
+            dbg(
+                b'debug.copies:      missing files to search: %d\n'
+                % len(missing)
+            )
 
     ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
 
-    if debug:
-        dbg(b'debug.copies:      missing files to search: %d\n' % len(missing))
-
     for f in sorted(missing):
         if debug:
             dbg(b'debug.copies:        tracing file: %s\n' % f)
@@ -267,6 +274,7 @@
     revs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
     roots = set()
     has_graph_roots = False
+    multi_thread = repo.ui.configbool(b'devel', b'copy-tracing.multi-thread')
 
     # iterate over `only(B, A)`
     for r in revs:
@@ -314,7 +322,13 @@
                     children_count[p] += 1
         revinfo = _revinfo_getter(repo, match)
         return _combine_changeset_copies(
-            revs, children_count, b.rev(), revinfo, match, isancestor
+            revs,
+            children_count,
+            b.rev(),
+            revinfo,
+            match,
+            isancestor,
+            multi_thread,
         )
     else:
         # When not using side-data, we will process the edges "from" the parent.
@@ -339,7 +353,7 @@
 
 
 def _combine_changeset_copies(
-    revs, children_count, targetrev, revinfo, match, isancestor
+    revs, children_count, targetrev, revinfo, match, isancestor, multi_thread
 ):
     """combine the copies information for each item of iterrevs
 
@@ -356,7 +370,7 @@
 
     if rustmod is not None:
         final_copies = rustmod.combine_changeset_copies(
-            list(revs), children_count, targetrev, revinfo, isancestor
+            list(revs), children_count, targetrev, revinfo, multi_thread
         )
     else:
         isancestor = cached_is_ancestor(isancestor)
@@ -427,7 +441,11 @@
                     # potential filelog related behavior.
                     assert parent == 2
                     current_copies = _merge_copies_dict(
-                        newcopies, current_copies, isancestor, changes
+                        newcopies,
+                        current_copies,
+                        isancestor,
+                        changes,
+                        current_rev,
                     )
             all_copies[current_rev] = current_copies
 
@@ -449,7 +467,7 @@
 PICK_EITHER = 2
 
 
-def _merge_copies_dict(minor, major, isancestor, changes):
+def _merge_copies_dict(minor, major, isancestor, changes, current_merge):
     """merge two copies-mapping together, minor and major
 
     In case of conflict, value from "major" will be picked.
@@ -467,39 +485,75 @@
         if other is None:
             minor[dest] = value
         else:
-            pick = _compare_values(changes, isancestor, dest, other, value)
-            if pick == PICK_MAJOR:
+            pick, overwrite = _compare_values(
+                changes, isancestor, dest, other, value
+            )
+            if overwrite:
+                if pick == PICK_MAJOR:
+                    minor[dest] = (current_merge, value[1])
+                else:
+                    minor[dest] = (current_merge, other[1])
+            elif pick == PICK_MAJOR:
                 minor[dest] = value
     return minor
 
 
 def _compare_values(changes, isancestor, dest, minor, major):
-    """compare two value within a _merge_copies_dict loop iteration"""
+    """compare two value within a _merge_copies_dict loop iteration
+
+    return (pick, overwrite).
+
+    - pick is one of PICK_MINOR, PICK_MAJOR or PICK_EITHER
+    - overwrite is True if pick is a return of an ambiguity that needs resolution.
+    """
     major_tt, major_value = major
     minor_tt, minor_value = minor
 
-    # evacuate some simple case first:
     if major_tt == minor_tt:
         # if it comes from the same revision it must be the same value
         assert major_value == minor_value
-        return PICK_EITHER
-    elif major[1] == minor[1]:
-        return PICK_EITHER
-
-    # actual merging needed: content from "major" wins, unless it is older than
-    # the branch point or there is a merge
-    elif changes is not None and major[1] is None and dest in changes.salvaged:
-        return PICK_MINOR
-    elif changes is not None and minor[1] is None and dest in changes.salvaged:
-        return PICK_MAJOR
-    elif changes is not None and dest in changes.merged:
-        return PICK_MAJOR
-    elif not isancestor(major_tt, minor_tt):
-        if major[1] is not None:
-            return PICK_MAJOR
-        elif isancestor(minor_tt, major_tt):
-            return PICK_MAJOR
-    return PICK_MINOR
+        return PICK_EITHER, False
+    elif (
+        changes is not None
+        and minor_value is not None
+        and major_value is None
+        and dest in changes.salvaged
+    ):
+        # In this case, a deletion was reverted, the "alive" value overwrite
+        # the deleted one.
+        return PICK_MINOR, True
+    elif (
+        changes is not None
+        and major_value is not None
+        and minor_value is None
+        and dest in changes.salvaged
+    ):
+        # In this case, a deletion was reverted, the "alive" value overwrite
+        # the deleted one.
+        return PICK_MAJOR, True
+    elif isancestor(minor_tt, major_tt):
+        if changes is not None and dest in changes.merged:
+            # change to dest happened on the branch without copy-source change,
+            # so both source are valid and "major" wins.
+            return PICK_MAJOR, True
+        else:
+            return PICK_MAJOR, False
+    elif isancestor(major_tt, minor_tt):
+        if changes is not None and dest in changes.merged:
+            # change to dest happened on the branch without copy-source change,
+            # so both source are valid and "major" wins.
+            return PICK_MAJOR, True
+        else:
+            return PICK_MINOR, False
+    elif minor_value is None:
+        # in case of conflict, the "alive" side wins.
+        return PICK_MAJOR, True
+    elif major_value is None:
+        # in case of conflict, the "alive" side wins.
+        return PICK_MINOR, True
+    else:
+        # in case of conflict where both side are alive, major wins.
+        return PICK_MAJOR, True
 
 
 def _revinfo_getter_extra(repo):
@@ -650,22 +704,28 @@
 
 
 def _backwardrenames(a, b, match):
+    """find renames from a to b"""
     if a._repo.ui.config(b'experimental', b'copytrace') == b'off':
         return {}
 
+    # We don't want to pass in "match" here, since that would filter
+    # the destination by it. Since we're reversing the copies, we want
+    # to filter the source instead.
+    copies = _forwardcopies(b, a)
+    return _reverse_renames(copies, a, match)
+
+
+def _reverse_renames(copies, dst, match):
+    """given copies to context 'dst', finds renames from that context"""
     # Even though we're not taking copies into account, 1:n rename situations
     # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
     # arbitrarily pick one of the renames.
-    # We don't want to pass in "match" here, since that would filter
-    # the destination by it. Since we're reversing the copies, we want
-    # to filter the source instead.
-    f = _forwardcopies(b, a)
     r = {}
-    for k, v in sorted(pycompat.iteritems(f)):
+    for k, v in sorted(pycompat.iteritems(copies)):
         if match and not match(v):
             continue
         # remove copies
-        if v in a:
+        if v in dst:
             continue
         r[v] = k
     return r
@@ -701,9 +761,17 @@
         base = None
         if a.rev() != nullrev:
             base = x
+        x_copies = _forwardcopies(a, x)
+        y_copies = _forwardcopies(a, y, base, match=match)
+        same_keys = set(x_copies) & set(y_copies)
+        for k in same_keys:
+            if x_copies.get(k) == y_copies.get(k):
+                del x_copies[k]
+                del y_copies[k]
+        x_backward_renames = _reverse_renames(x_copies, x, match)
         copies = _chain(
-            _backwardrenames(x, a, match=match),
-            _forwardcopies(a, y, base, match=match),
+            x_backward_renames,
+            y_copies,
         )
     _filter(x, y, copies)
     return copies
@@ -1042,11 +1110,17 @@
             b"   discovered dir src: '%s' -> dst: '%s'\n" % (d, dirmove[d])
         )
 
+    # Sort the directories in reverse order, so we find children first
+    # For example, if dir1/ was renamed to dir2/, and dir1/subdir1/
+    # was renamed to dir2/subdir2/, we want to move dir1/subdir1/file
+    # to dir2/subdir2/file (not dir2/subdir1/file)
+    dirmove_children_first = sorted(dirmove, reverse=True)
+
     movewithdir = {}
     # check unaccounted nonoverlapping files against directory moves
     for f in addedfilesfn():
         if f not in fullcopy:
-            for d in dirmove:
+            for d in dirmove_children_first:
                 if f.startswith(d):
                     # new file added in a directory that was moved, move it
                     df = dirmove[d] + f[len(d) :]
@@ -1220,6 +1294,15 @@
     by merge.update().
     """
     new_copies = pathcopies(base, ctx)
-    _filter(wctx.p1(), wctx, new_copies)
+    parent = wctx.p1()
+    _filter(parent, wctx, new_copies)
+    # Extra filtering to drop copy information for files that existed before
+    # the graft. This is to handle the case of grafting a rename onto a commit
+    # that already has the rename. Otherwise the presence of copy information
+    # would result in the creation of an empty commit where we would prefer to
+    # not create one.
+    for dest, __ in list(new_copies.items()):
+        if dest in parent:
+            del new_copies[dest]
     for dst, src in pycompat.iteritems(new_copies):
         wctx[dst].markcopied(src)
--- a/mercurial/debugcommands.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/debugcommands.py	Thu Mar 18 18:24:59 2021 -0400
@@ -9,6 +9,7 @@
 
 import codecs
 import collections
+import contextlib
 import difflib
 import errno
 import glob
@@ -69,6 +70,7 @@
     pycompat,
     registrar,
     repair,
+    repoview,
     revlog,
     revset,
     revsetlang,
@@ -345,7 +347,7 @@
         def showchunks(named):
             ui.write(b"\n%s%s\n" % (indent_string, named))
             for deltadata in gen.deltaiter():
-                node, p1, p2, cs, deltabase, delta, flags = deltadata
+                node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
                 ui.write(
                     b"%s%s %s %s %s %s %d\n"
                     % (
@@ -371,7 +373,7 @@
             raise error.Abort(_(b'use debugbundle2 for this file'))
         gen.changelogheader()
         for deltadata in gen.deltaiter():
-            node, p1, p2, cs, deltabase, delta, flags = deltadata
+            node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
             ui.write(b"%s%s\n" % (indent_string, hex(node)))
 
 
@@ -470,27 +472,47 @@
     """lists the capabilities of a remote peer"""
     opts = pycompat.byteskwargs(opts)
     peer = hg.peer(ui, opts, path)
-    caps = peer.capabilities()
-    ui.writenoi18n(b'Main capabilities:\n')
-    for c in sorted(caps):
-        ui.write(b'  %s\n' % c)
-    b2caps = bundle2.bundle2caps(peer)
-    if b2caps:
-        ui.writenoi18n(b'Bundle2 capabilities:\n')
-        for key, values in sorted(pycompat.iteritems(b2caps)):
-            ui.write(b'  %s\n' % key)
-            for v in values:
-                ui.write(b'    %s\n' % v)
-
-
-@command(b'debugchangedfiles', [], b'REV')
-def debugchangedfiles(ui, repo, rev):
+    try:
+        caps = peer.capabilities()
+        ui.writenoi18n(b'Main capabilities:\n')
+        for c in sorted(caps):
+            ui.write(b'  %s\n' % c)
+        b2caps = bundle2.bundle2caps(peer)
+        if b2caps:
+            ui.writenoi18n(b'Bundle2 capabilities:\n')
+            for key, values in sorted(pycompat.iteritems(b2caps)):
+                ui.write(b'  %s\n' % key)
+                for v in values:
+                    ui.write(b'    %s\n' % v)
+    finally:
+        peer.close()
+
+
+@command(
+    b'debugchangedfiles',
+    [
+        (
+            b'',
+            b'compute',
+            False,
+            b"compute information instead of reading it from storage",
+        ),
+    ],
+    b'REV',
+)
+def debugchangedfiles(ui, repo, rev, **opts):
     """list the stored files changes for a revision"""
     ctx = scmutil.revsingle(repo, rev, None)
-    sd = repo.changelog.sidedata(ctx.rev())
-    files_block = sd.get(sidedata.SD_FILES)
-    if files_block is not None:
-        files = metadata.decode_files_sidedata(sd)
+    files = None
+
+    if opts['compute']:
+        files = metadata.compute_all_files_changes(ctx)
+    else:
+        sd = repo.changelog.sidedata(ctx.rev())
+        files_block = sd.get(sidedata.SD_FILES)
+        if files_block is not None:
+            files = metadata.decode_files_sidedata(sd)
+    if files is not None:
         for f in sorted(files.touched):
             if f in files.added:
                 action = b"added"
@@ -964,20 +986,74 @@
         ),
         (b'', b'rev', [], b'restrict discovery to this set of revs'),
         (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
+        (
+            b'',
+            b'local-as-revs',
+            b"",
+            b'treat local has having these revisions only',
+        ),
+        (
+            b'',
+            b'remote-as-revs',
+            b"",
+            b'use local as remote, with only these these revisions',
+        ),
     ]
-    + cmdutil.remoteopts,
+    + cmdutil.remoteopts
+    + cmdutil.formatteropts,
     _(b'[--rev REV] [OTHER]'),
 )
 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
-    """runs the changeset discovery protocol in isolation"""
+    """runs the changeset discovery protocol in isolation
+
+    The local peer can be "replaced" by a subset of the local repository by
+    using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
+    be "replaced" by a subset of the local repository using the
+    `--local-as-revs` flag. This is useful to efficiently debug pathological
+    discovery situation.
+    """
     opts = pycompat.byteskwargs(opts)
-    remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
-    remote = hg.peer(repo, opts, remoteurl)
-    ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
+    unfi = repo.unfiltered()
+
+    # setup potential extra filtering
+    local_revs = opts[b"local_as_revs"]
+    remote_revs = opts[b"remote_as_revs"]
 
     # make sure tests are repeatable
     random.seed(int(opts[b'seed']))
 
+    if not remote_revs:
+
+        remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
+        remote = hg.peer(repo, opts, remoteurl)
+        ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
+    else:
+        branches = (None, [])
+        remote_filtered_revs = scmutil.revrange(
+            unfi, [b"not (::(%s))" % remote_revs]
+        )
+        remote_filtered_revs = frozenset(remote_filtered_revs)
+
+        def remote_func(x):
+            return remote_filtered_revs
+
+        repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
+
+        remote = repo.peer()
+        remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
+
+    if local_revs:
+        local_filtered_revs = scmutil.revrange(
+            unfi, [b"not (::(%s))" % local_revs]
+        )
+        local_filtered_revs = frozenset(local_filtered_revs)
+
+        def local_func(x):
+            return local_filtered_revs
+
+        repoview.filtertable[b'debug-discovery-local-filter'] = local_func
+        repo = repo.filtered(b'debug-discovery-local-filter')
+
     data = {}
     if opts.get(b'old'):
 
@@ -1014,8 +1090,21 @@
 
     remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
     localrevs = opts[b'rev']
-    with util.timedcm('debug-discovery') as t:
-        common, hds = doit(localrevs, remoterevs)
+
+    fm = ui.formatter(b'debugdiscovery', opts)
+    if fm.strict_format:
+
+        @contextlib.contextmanager
+        def may_capture_output():
+            ui.pushbuffer()
+            yield
+            data[b'output'] = ui.popbuffer()
+
+    else:
+        may_capture_output = util.nullcontextmanager
+    with may_capture_output():
+        with util.timedcm('debug-discovery') as t:
+            common, hds = doit(localrevs, remoterevs)
 
     # compute all statistics
     heads_common = set(common)
@@ -1066,50 +1155,41 @@
     data[b'nb-ini_und-common'] = len(common_initial_undecided)
     data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
 
+    fm.startitem()
+    fm.data(**pycompat.strkwargs(data))
     # display discovery summary
-    ui.writenoi18n(b"elapsed time:  %(elapsed)f seconds\n" % data)
-    ui.writenoi18n(b"round-trips:           %(total-roundtrips)9d\n" % data)
-    ui.writenoi18n(b"heads summary:\n")
-    ui.writenoi18n(b"  total common heads:  %(nb-common-heads)9d\n" % data)
-    ui.writenoi18n(
-        b"    also local heads:  %(nb-common-heads-local)9d\n" % data
-    )
-    ui.writenoi18n(
-        b"    also remote heads: %(nb-common-heads-remote)9d\n" % data
-    )
-    ui.writenoi18n(b"    both:              %(nb-common-heads-both)9d\n" % data)
-    ui.writenoi18n(b"  local heads:         %(nb-head-local)9d\n" % data)
-    ui.writenoi18n(
-        b"    common:            %(nb-common-heads-local)9d\n" % data
-    )
-    ui.writenoi18n(
-        b"    missing:           %(nb-head-local-missing)9d\n" % data
-    )
-    ui.writenoi18n(b"  remote heads:        %(nb-head-remote)9d\n" % data)
-    ui.writenoi18n(
-        b"    common:            %(nb-common-heads-remote)9d\n" % data
-    )
-    ui.writenoi18n(
-        b"    unknown:           %(nb-head-remote-unknown)9d\n" % data
-    )
-    ui.writenoi18n(b"local changesets:      %(nb-revs)9d\n" % data)
-    ui.writenoi18n(b"  common:              %(nb-revs-common)9d\n" % data)
-    ui.writenoi18n(b"    heads:             %(nb-common-heads)9d\n" % data)
-    ui.writenoi18n(b"    roots:             %(nb-common-roots)9d\n" % data)
-    ui.writenoi18n(b"  missing:             %(nb-revs-missing)9d\n" % data)
-    ui.writenoi18n(b"    heads:             %(nb-missing-heads)9d\n" % data)
-    ui.writenoi18n(b"    roots:             %(nb-missing-roots)9d\n" % data)
-    ui.writenoi18n(b"  first undecided set: %(nb-ini_und)9d\n" % data)
-    ui.writenoi18n(b"    heads:             %(nb-ini_und-heads)9d\n" % data)
-    ui.writenoi18n(b"    roots:             %(nb-ini_und-roots)9d\n" % data)
-    ui.writenoi18n(b"    common:            %(nb-ini_und-common)9d\n" % data)
-    ui.writenoi18n(b"    missing:           %(nb-ini_und-missing)9d\n" % data)
+    fm.plain(b"elapsed time:  %(elapsed)f seconds\n" % data)
+    fm.plain(b"round-trips:           %(total-roundtrips)9d\n" % data)
+    fm.plain(b"heads summary:\n")
+    fm.plain(b"  total common heads:  %(nb-common-heads)9d\n" % data)
+    fm.plain(b"    also local heads:  %(nb-common-heads-local)9d\n" % data)
+    fm.plain(b"    also remote heads: %(nb-common-heads-remote)9d\n" % data)
+    fm.plain(b"    both:              %(nb-common-heads-both)9d\n" % data)
+    fm.plain(b"  local heads:         %(nb-head-local)9d\n" % data)
+    fm.plain(b"    common:            %(nb-common-heads-local)9d\n" % data)
+    fm.plain(b"    missing:           %(nb-head-local-missing)9d\n" % data)
+    fm.plain(b"  remote heads:        %(nb-head-remote)9d\n" % data)
+    fm.plain(b"    common:            %(nb-common-heads-remote)9d\n" % data)
+    fm.plain(b"    unknown:           %(nb-head-remote-unknown)9d\n" % data)
+    fm.plain(b"local changesets:      %(nb-revs)9d\n" % data)
+    fm.plain(b"  common:              %(nb-revs-common)9d\n" % data)
+    fm.plain(b"    heads:             %(nb-common-heads)9d\n" % data)
+    fm.plain(b"    roots:             %(nb-common-roots)9d\n" % data)
+    fm.plain(b"  missing:             %(nb-revs-missing)9d\n" % data)
+    fm.plain(b"    heads:             %(nb-missing-heads)9d\n" % data)
+    fm.plain(b"    roots:             %(nb-missing-roots)9d\n" % data)
+    fm.plain(b"  first undecided set: %(nb-ini_und)9d\n" % data)
+    fm.plain(b"    heads:             %(nb-ini_und-heads)9d\n" % data)
+    fm.plain(b"    roots:             %(nb-ini_und-roots)9d\n" % data)
+    fm.plain(b"    common:            %(nb-ini_und-common)9d\n" % data)
+    fm.plain(b"    missing:           %(nb-ini_und-missing)9d\n" % data)
 
     if ui.verbose:
-        ui.writenoi18n(
+        fm.plain(
             b"common heads: %s\n"
             % b" ".join(sorted(short(n) for n in heads_common))
         )
+    fm.end()
 
 
 _chunksize = 4 << 10
@@ -2214,9 +2294,9 @@
             b'',
             b'dump-new',
             False,
-            _(b'write a (new) persistent binary nodemap on stdin'),
+            _(b'write a (new) persistent binary nodemap on stdout'),
         ),
-        (b'', b'dump-disk', False, _(b'dump on-disk data on stdin')),
+        (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
         (
             b'',
             b'check',
@@ -2546,12 +2626,17 @@
     with ui.configoverride(overrides):
         peer = hg.peer(ui, {}, path)
 
-        local = peer.local() is not None
-        canpush = peer.canpush()
-
-        ui.write(_(b'url: %s\n') % peer.url())
-        ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
-        ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
+        try:
+            local = peer.local() is not None
+            canpush = peer.canpush()
+
+            ui.write(_(b'url: %s\n') % peer.url())
+            ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
+            ui.write(
+                _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
+            )
+        finally:
+            peer.close()
 
 
 @command(
@@ -2654,26 +2739,30 @@
     """
 
     target = hg.peer(ui, {}, repopath)
-    if keyinfo:
-        key, old, new = keyinfo
-        with target.commandexecutor() as e:
-            r = e.callcommand(
-                b'pushkey',
-                {
-                    b'namespace': namespace,
-                    b'key': key,
-                    b'old': old,
-                    b'new': new,
-                },
-            ).result()
-
-        ui.status(pycompat.bytestr(r) + b'\n')
-        return not r
-    else:
-        for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
-            ui.write(
-                b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
-            )
+    try:
+        if keyinfo:
+            key, old, new = keyinfo
+            with target.commandexecutor() as e:
+                r = e.callcommand(
+                    b'pushkey',
+                    {
+                        b'namespace': namespace,
+                        b'key': key,
+                        b'old': old,
+                        b'new': new,
+                    },
+                ).result()
+
+            ui.status(pycompat.bytestr(r) + b'\n')
+            return not r
+        else:
+            for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
+                ui.write(
+                    b"%s\t%s\n"
+                    % (stringutil.escapestr(k), stringutil.escapestr(v))
+                )
+    finally:
+        target.close()
 
 
 @command(b'debugpvec', [], _(b'A B'))
@@ -3719,6 +3808,23 @@
         ui.writenoi18n(b' revision %s\n' % v[1])
 
 
+@command(b'debugshell', optionalrepo=True)
+def debugshell(ui, repo):
+    """run an interactive Python interpreter
+
+    The local namespace is provided with a reference to the ui and
+    the repo instance (if available).
+    """
+    import code
+
+    imported_objects = {
+        'ui': ui,
+        'repo': repo,
+    }
+
+    code.interact(local=imported_objects)
+
+
 @command(
     b'debugsuccessorssets',
     [(b'', b'closest', False, _(b'return closest successors sets only'))],
@@ -3779,10 +3885,19 @@
 def debugtagscache(ui, repo):
     """display the contents of .hg/cache/hgtagsfnodes1"""
     cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
+    flog = repo.file(b'.hgtags')
     for r in repo:
         node = repo[r].node()
         tagsnode = cache.getfnode(node, computemissing=False)
-        tagsnodedisplay = hex(tagsnode) if tagsnode else b'missing/invalid'
+        if tagsnode:
+            tagsnodedisplay = hex(tagsnode)
+            if not flog.hasnode(tagsnode):
+                tagsnodedisplay += b' (unknown node)'
+        elif tagsnode is None:
+            tagsnodedisplay = b'missing'
+        else:
+            tagsnodedisplay = b'invalid'
+
         ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
 
 
@@ -4000,19 +4115,22 @@
 def debugwireargs(ui, repopath, *vals, **opts):
     opts = pycompat.byteskwargs(opts)
     repo = hg.peer(ui, opts, repopath)
-    for opt in cmdutil.remoteopts:
-        del opts[opt[1]]
-    args = {}
-    for k, v in pycompat.iteritems(opts):
-        if v:
-            args[k] = v
-    args = pycompat.strkwargs(args)
-    # run twice to check that we don't mess up the stream for the next command
-    res1 = repo.debugwireargs(*vals, **args)
-    res2 = repo.debugwireargs(*vals, **args)
-    ui.write(b"%s\n" % res1)
-    if res1 != res2:
-        ui.warn(b"%s\n" % res2)
+    try:
+        for opt in cmdutil.remoteopts:
+            del opts[opt[1]]
+        args = {}
+        for k, v in pycompat.iteritems(opts):
+            if v:
+                args[k] = v
+        args = pycompat.strkwargs(args)
+        # run twice to check that we don't mess up the stream for the next command
+        res1 = repo.debugwireargs(*vals, **args)
+        res2 = repo.debugwireargs(*vals, **args)
+        ui.write(b"%s\n" % res1)
+        if res1 != res2:
+            ui.warn(b"%s\n" % res2)
+    finally:
+        repo.close()
 
 
 def _parsewirelangblocks(fh):
--- a/mercurial/dirstate.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/dirstate.py	Thu Mar 18 18:24:59 2021 -0400
@@ -73,13 +73,16 @@
 
 @interfaceutil.implementer(intdirstate.idirstate)
 class dirstate(object):
-    def __init__(self, opener, ui, root, validate, sparsematchfn):
+    def __init__(
+        self, opener, ui, root, validate, sparsematchfn, nodeconstants
+    ):
         """Create a new dirstate object.
 
         opener is an open()-like callable that can be used to open the
         dirstate file; root is the root of the directory tracked by
         the dirstate.
         """
+        self._nodeconstants = nodeconstants
         self._opener = opener
         self._validate = validate
         self._root = root
@@ -136,7 +139,9 @@
     @propertycache
     def _map(self):
         """Return the dirstate contents (see documentation for dirstatemap)."""
-        self._map = self._mapcls(self._ui, self._opener, self._root)
+        self._map = self._mapcls(
+            self._ui, self._opener, self._root, self._nodeconstants
+        )
         return self._map
 
     @property
@@ -1425,12 +1430,13 @@
       denormalized form that they appear as in the dirstate.
     """
 
-    def __init__(self, ui, opener, root):
+    def __init__(self, ui, opener, root, nodeconstants):
         self._ui = ui
         self._opener = opener
         self._root = root
         self._filename = b'dirstate'
         self._nodelen = 20
+        self._nodeconstants = nodeconstants
 
         self._parents = None
         self._dirtyparents = False
@@ -1729,7 +1735,8 @@
 if rustmod is not None:
 
     class dirstatemap(object):
-        def __init__(self, ui, opener, root):
+        def __init__(self, ui, opener, root, nodeconstants):
+            self._nodeconstants = nodeconstants
             self._ui = ui
             self._opener = opener
             self._root = root
--- a/mercurial/discovery.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/discovery.py	Thu Mar 18 18:24:59 2021 -0400
@@ -270,9 +270,12 @@
     # C. Update newmap with outgoing changes.
     # This will possibly add new heads and remove existing ones.
     newmap = branchmap.remotebranchcache(
-        (branch, heads[1])
-        for branch, heads in pycompat.iteritems(headssum)
-        if heads[0] is not None
+        repo,
+        (
+            (branch, heads[1])
+            for branch, heads in pycompat.iteritems(headssum)
+            if heads[0] is not None
+        ),
     )
     newmap.update(repo, (ctx.rev() for ctx in missingctx))
     for branch, newheads in pycompat.iteritems(newmap):
--- a/mercurial/error.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/error.py	Thu Mar 18 18:24:59 2021 -0400
@@ -60,6 +60,7 @@
 
 class SidedataHashError(RevlogError):
     def __init__(self, key, expected, got):
+        self.hint = None
         self.sidedatakey = key
         self.expecteddigest = expected
         self.actualdigest = got
@@ -77,9 +78,9 @@
         # Python 2.6+ complain about the 'message' property being deprecated
         self.lookupmessage = message
         if isinstance(name, bytes) and len(name) == 20:
-            from .node import short
+            from .node import hex
 
-            name = short(name)
+            name = hex(name)
         # if name is a binary node, it can be None
         RevlogError.__init__(
             self, b'%s@%s: %s' % (index, pycompat.bytestr(name), message)
--- a/mercurial/exchange.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/exchange.py	Thu Mar 18 18:24:59 2021 -0400
@@ -420,7 +420,20 @@
                 b'unbundle wire protocol command'
             )
         )
-
+    for category in sorted(bundle2.read_remote_wanted_sidedata(pushop.remote)):
+        # Check that a computer is registered for that category for at least
+        # one revlog kind.
+        for kind, computers in repo._sidedata_computers.items():
+            if computers.get(category):
+                break
+        else:
+            raise error.Abort(
+                _(
+                    b'cannot push: required sidedata category not supported'
+                    b" by this client: '%s'"
+                )
+                % pycompat.bytestr(category)
+            )
     # get lock as we might write phase data
     wlock = lock = None
     try:
@@ -814,7 +827,7 @@
     data = []
     for book, old, new in pushop.outbookmarks:
         data.append((book, old))
-    checkdata = bookmod.binaryencode(data)
+    checkdata = bookmod.binaryencode(pushop.repo, data)
     bundler.newpart(b'check:bookmarks', data=checkdata)
 
 
@@ -865,8 +878,15 @@
         if not cgversions:
             raise error.Abort(_(b'no common changegroup version'))
         version = max(cgversions)
+
+    remote_sidedata = bundle2.read_remote_wanted_sidedata(pushop.remote)
     cgstream = changegroup.makestream(
-        pushop.repo, pushop.outgoing, version, b'push'
+        pushop.repo,
+        pushop.outgoing,
+        version,
+        b'push',
+        bundlecaps=b2caps,
+        remote_sidedata=remote_sidedata,
     )
     cgpart = bundler.newpart(b'changegroup', data=cgstream)
     if cgversions:
@@ -1007,7 +1027,7 @@
         _abortonsecretctx(pushop, new, book)
         data.append((book, new))
         allactions.append((book, _bmaction(old, new)))
-    checkdata = bookmod.binaryencode(data)
+    checkdata = bookmod.binaryencode(pushop.repo, data)
     bundler.newpart(b'bookmarks', data=checkdata)
 
     def handlereply(op):
@@ -1135,9 +1155,9 @@
         except error.BundleValueError as exc:
             raise error.Abort(_(b'missing support for %s') % exc)
         except bundle2.AbortFromPart as exc:
-            pushop.ui.status(_(b'remote: %s\n') % exc)
+            pushop.ui.error(_(b'remote: %s\n') % exc)
             if exc.hint is not None:
-                pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
+                pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
             raise error.Abort(_(b'push failed on remote'))
     except error.PushkeyFailed as exc:
         partid = int(exc.partid)
@@ -1607,6 +1627,23 @@
             ) % (b', '.join(sorted(missing)))
             raise error.Abort(msg)
 
+    for category in repo._wanted_sidedata:
+        # Check that a computer is registered for that category for at least
+        # one revlog kind.
+        for kind, computers in repo._sidedata_computers.items():
+            if computers.get(category):
+                break
+        else:
+            # This should never happen since repos are supposed to be able to
+            # generate the sidedata they require.
+            raise error.ProgrammingError(
+                _(
+                    b'sidedata category requested by local side without local'
+                    b"support: '%s'"
+                )
+                % pycompat.bytestr(category)
+            )
+
     pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
     wlock = util.nullcontextmanager()
     if not bookmod.bookmarksinstore(repo):
@@ -1820,6 +1857,10 @@
             pullop.stepsdone.add(b'obsmarkers')
     _pullbundle2extraprepare(pullop, kwargs)
 
+    remote_sidedata = bundle2.read_remote_wanted_sidedata(pullop.remote)
+    if remote_sidedata:
+        kwargs[b'remote_sidedata'] = remote_sidedata
+
     with pullop.remote.commandexecutor() as e:
         args = dict(kwargs)
         args[b'source'] = b'pull'
@@ -1832,7 +1873,7 @@
             op.modes[b'bookmarks'] = b'records'
             bundle2.processbundle(pullop.repo, bundle, op=op)
         except bundle2.AbortFromPart as exc:
-            pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
+            pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
             raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
         except error.BundleValueError as exc:
             raise error.Abort(_(b'missing support for %s') % exc)
@@ -2249,7 +2290,13 @@
 
 
 def getbundlechunks(
-    repo, source, heads=None, common=None, bundlecaps=None, **kwargs
+    repo,
+    source,
+    heads=None,
+    common=None,
+    bundlecaps=None,
+    remote_sidedata=None,
+    **kwargs
 ):
     """Return chunks constituting a bundle's raw data.
 
@@ -2279,7 +2326,12 @@
         return (
             info,
             changegroup.makestream(
-                repo, outgoing, b'01', source, bundlecaps=bundlecaps
+                repo,
+                outgoing,
+                b'01',
+                source,
+                bundlecaps=bundlecaps,
+                remote_sidedata=remote_sidedata,
             ),
         )
 
@@ -2303,6 +2355,7 @@
             source,
             bundlecaps=bundlecaps,
             b2caps=b2caps,
+            remote_sidedata=remote_sidedata,
             **pycompat.strkwargs(kwargs)
         )
 
@@ -2325,6 +2378,7 @@
     b2caps=None,
     heads=None,
     common=None,
+    remote_sidedata=None,
     **kwargs
 ):
     """add a changegroup part to the requested bundle"""
@@ -2355,7 +2409,13 @@
         matcher = None
 
     cgstream = changegroup.makestream(
-        repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
+        repo,
+        outgoing,
+        version,
+        source,
+        bundlecaps=bundlecaps,
+        matcher=matcher,
+        remote_sidedata=remote_sidedata,
     )
 
     part = bundler.newpart(b'changegroup', data=cgstream)
@@ -2369,6 +2429,8 @@
 
     if b'exp-sidedata-flag' in repo.requirements:
         part.addparam(b'exp-sidedata', b'1')
+        sidedata = bundle2.format_remote_wanted_sidedata(repo)
+        part.addparam(b'exp-wanted-sidedata', sidedata)
 
     if (
         kwargs.get('narrow', False)
@@ -2393,7 +2455,7 @@
     if not b2caps or b'bookmarks' not in b2caps:
         raise error.Abort(_(b'no common bookmarks exchange method'))
     books = bookmod.listbinbookmarks(repo)
-    data = bookmod.binaryencode(books)
+    data = bookmod.binaryencode(repo, books)
     if data:
         bundler.newpart(b'bookmarks', data=data)
 
--- a/mercurial/exchangev2.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/exchangev2.py	Thu Mar 18 18:24:59 2021 -0400
@@ -22,6 +22,7 @@
     narrowspec,
     phases,
     pycompat,
+    requirements as requirementsmod,
     setdiscovery,
 )
 from .interfaces import repository
@@ -183,7 +184,7 @@
 
     # TODO This is super hacky. There needs to be a storage API for this. We
     # also need to check for compatibility with the remote.
-    if b'revlogv1' not in repo.requirements:
+    if requirementsmod.REVLOGV1_REQUIREMENT not in repo.requirements:
         return False
 
     return True
@@ -358,18 +359,20 @@
         # Linkrev for changelog is always self.
         return len(cl)
 
-    def ondupchangeset(cl, node):
-        added.append(node)
+    def ondupchangeset(cl, rev):
+        added.append(cl.node(rev))
 
-    def onchangeset(cl, node):
+    def onchangeset(cl, rev):
         progress.increment()
 
-        revision = cl.changelogrevision(node)
-        added.append(node)
+        revision = cl.changelogrevision(rev)
+        added.append(cl.node(rev))
 
         # We need to preserve the mapping of changelog revision to node
         # so we can set the linkrev accordingly when manifests are added.
-        manifestnodes[cl.rev(node)] = revision.manifest
+        manifestnodes[rev] = revision.manifest
+
+        repo.register_changeset(rev, revision)
 
     nodesbyphase = {phase: set() for phase in phases.phasenames.values()}
     remotebookmarks = {}
@@ -414,12 +417,15 @@
                 mdiff.trivialdiffheader(len(data)) + data,
                 # Flags not yet supported.
                 0,
+                # Sidedata not yet supported
+                {},
             )
 
     cl.addgroup(
         iterrevisions(),
         linkrev,
         weakref.proxy(tr),
+        alwayscache=True,
         addrevisioncb=onchangeset,
         duplicaterevisioncb=ondupchangeset,
     )
@@ -492,6 +498,8 @@
                 delta,
                 # Flags not yet supported.
                 0,
+                # Sidedata not yet supported.
+                {},
             )
 
             progress.increment()
@@ -533,8 +541,8 @@
             # Chomp off header object.
             next(objs)
 
-            def onchangeset(cl, node):
-                added.append(node)
+            def onchangeset(cl, rev):
+                added.append(cl.node(rev))
 
             rootmanifest.addgroup(
                 iterrevisions(objs, progress),
@@ -617,6 +625,8 @@
                 delta,
                 # Flags not yet supported.
                 0,
+                # Sidedata not yet supported.
+                {},
             )
 
             progress.increment()
@@ -715,6 +725,8 @@
                 delta,
                 # Flags not yet supported.
                 0,
+                # Sidedata not yet supported.
+                {},
             )
 
             progress.increment()
--- a/mercurial/filelog.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/filelog.py	Thu Mar 18 18:24:59 2021 -0400
@@ -32,6 +32,8 @@
         # Full name of the user visible file, relative to the repository root.
         # Used by LFS.
         self._revlog.filename = path
+        self._revlog.revlog_kind = b'filelog'
+        self.nullid = self._revlog.nullid
 
     def __len__(self):
         return len(self._revlog)
@@ -102,6 +104,7 @@
         revisiondata=False,
         assumehaveparentrevisions=False,
         deltamode=repository.CG_DELTAMODE_STD,
+        sidedata_helpers=None,
     ):
         return self._revlog.emitrevisions(
             nodes,
@@ -109,6 +112,7 @@
             revisiondata=revisiondata,
             assumehaveparentrevisions=assumehaveparentrevisions,
             deltamode=deltamode,
+            sidedata_helpers=sidedata_helpers,
         )
 
     def addrevision(
@@ -176,7 +180,8 @@
     def add(self, text, meta, transaction, link, p1=None, p2=None):
         if meta or text.startswith(b'\1\n'):
             text = storageutil.packmeta(meta, text)
-        return self.addrevision(text, transaction, link, p1, p2)
+        rev = self.addrevision(text, transaction, link, p1, p2)
+        return self.node(rev)
 
     def renamed(self, node):
         return storageutil.filerevisioncopied(self, node)
--- a/mercurial/filemerge.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/filemerge.py	Thu Mar 18 18:24:59 2021 -0400
@@ -538,6 +538,25 @@
 
 
 @internaltool(
+    b'merge3-lie-about-conflicts',
+    fullmerge,
+    b'',
+    precheck=_mergecheck,
+)
+def _imerge3alwaysgood(*args, **kwargs):
+    # Like merge3, but record conflicts as resolved with markers in place.
+    #
+    # This is used for `diff.merge` to show the differences between
+    # the auto-merge state and the committed merge state. It may be
+    # useful for other things.
+    b1, junk, b2 = _imerge3(*args, **kwargs)
+    # TODO is this right? I'm not sure what these return values mean,
+    # but as far as I can tell this will indicate to callers tha the
+    # merge succeeded.
+    return b1, False, b2
+
+
+@internaltool(
     b'mergediff',
     fullmerge,
     _(
@@ -1195,7 +1214,11 @@
 
 def hasconflictmarkers(data):
     return bool(
-        re.search(b"^(<<<<<<< .*|=======|>>>>>>> .*)$", data, re.MULTILINE)
+        re.search(
+            br"^(<<<<<<<.*|=======.*|------- .*|\+\+\+\+\+\+\+ .*|>>>>>>>.*)$",
+            data,
+            re.MULTILINE,
+        )
     )
 
 
--- a/mercurial/formatter.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/formatter.py	Thu Mar 18 18:24:59 2021 -0400
@@ -178,6 +178,11 @@
 
 
 class baseformatter(object):
+
+    # set to True if the formater output a strict format that does not support
+    # arbitrary output in the stream.
+    strict_format = False
+
     def __init__(self, ui, topic, opts, converter):
         self._ui = ui
         self._topic = topic
@@ -418,6 +423,9 @@
 
 
 class jsonformatter(baseformatter):
+
+    strict_format = True
+
     def __init__(self, ui, out, topic, opts):
         baseformatter.__init__(self, ui, topic, opts, _nullconverter)
         self._out = out
--- a/mercurial/help.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/help.py	Thu Mar 18 18:24:59 2021 -0400
@@ -829,10 +829,11 @@
         def appendcmds(cmds):
             cmds = sorted(cmds)
             for c in cmds:
+                display_cmd = c
                 if ui.verbose:
-                    rst.append(b" :%s: %s\n" % (b', '.join(syns[c]), h[c]))
-                else:
-                    rst.append(b' :%s: %s\n' % (c, h[c]))
+                    display_cmd = b', '.join(syns[c])
+                display_cmd = display_cmd.replace(b':', br'\:')
+                rst.append(b' :%s: %s\n' % (display_cmd, h[c]))
 
         if name in (b'shortlist', b'debug'):
             # List without categories.
--- a/mercurial/hg.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/hg.py	Thu Mar 18 18:24:59 2021 -0400
@@ -681,140 +681,148 @@
         srcpeer = source.peer()  # in case we were called with a localrepo
         branches = (None, branch or [])
         origsource = source = srcpeer.url()
-    revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
+    srclock = destlock = cleandir = None
+    destpeer = None
+    try:
+        revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
 
-    if dest is None:
-        dest = defaultdest(source)
-        if dest:
-            ui.status(_(b"destination directory: %s\n") % dest)
-    else:
-        dest = ui.expandpath(dest)
+        if dest is None:
+            dest = defaultdest(source)
+            if dest:
+                ui.status(_(b"destination directory: %s\n") % dest)
+        else:
+            dest = ui.expandpath(dest)
 
-    dest = util.urllocalpath(dest)
-    source = util.urllocalpath(source)
+        dest = util.urllocalpath(dest)
+        source = util.urllocalpath(source)
 
-    if not dest:
-        raise error.InputError(_(b"empty destination path is not valid"))
+        if not dest:
+            raise error.InputError(_(b"empty destination path is not valid"))
 
-    destvfs = vfsmod.vfs(dest, expandpath=True)
-    if destvfs.lexists():
-        if not destvfs.isdir():
-            raise error.InputError(_(b"destination '%s' already exists") % dest)
-        elif destvfs.listdir():
-            raise error.InputError(_(b"destination '%s' is not empty") % dest)
+        destvfs = vfsmod.vfs(dest, expandpath=True)
+        if destvfs.lexists():
+            if not destvfs.isdir():
+                raise error.InputError(
+                    _(b"destination '%s' already exists") % dest
+                )
+            elif destvfs.listdir():
+                raise error.InputError(
+                    _(b"destination '%s' is not empty") % dest
+                )
 
-    createopts = {}
-    narrow = False
-
-    if storeincludepats is not None:
-        narrowspec.validatepatterns(storeincludepats)
-        narrow = True
+        createopts = {}
+        narrow = False
 
-    if storeexcludepats is not None:
-        narrowspec.validatepatterns(storeexcludepats)
-        narrow = True
+        if storeincludepats is not None:
+            narrowspec.validatepatterns(storeincludepats)
+            narrow = True
+
+        if storeexcludepats is not None:
+            narrowspec.validatepatterns(storeexcludepats)
+            narrow = True
 
-    if narrow:
-        # Include everything by default if only exclusion patterns defined.
-        if storeexcludepats and not storeincludepats:
-            storeincludepats = {b'path:.'}
+        if narrow:
+            # Include everything by default if only exclusion patterns defined.
+            if storeexcludepats and not storeincludepats:
+                storeincludepats = {b'path:.'}
 
-        createopts[b'narrowfiles'] = True
+            createopts[b'narrowfiles'] = True
 
-    if depth:
-        createopts[b'shallowfilestore'] = True
+        if depth:
+            createopts[b'shallowfilestore'] = True
 
-    if srcpeer.capable(b'lfs-serve'):
-        # Repository creation honors the config if it disabled the extension, so
-        # we can't just announce that lfs will be enabled.  This check avoids
-        # saying that lfs will be enabled, and then saying it's an unknown
-        # feature.  The lfs creation option is set in either case so that a
-        # requirement is added.  If the extension is explicitly disabled but the
-        # requirement is set, the clone aborts early, before transferring any
-        # data.
-        createopts[b'lfs'] = True
+        if srcpeer.capable(b'lfs-serve'):
+            # Repository creation honors the config if it disabled the extension, so
+            # we can't just announce that lfs will be enabled.  This check avoids
+            # saying that lfs will be enabled, and then saying it's an unknown
+            # feature.  The lfs creation option is set in either case so that a
+            # requirement is added.  If the extension is explicitly disabled but the
+            # requirement is set, the clone aborts early, before transferring any
+            # data.
+            createopts[b'lfs'] = True
 
-        if extensions.disabled_help(b'lfs'):
-            ui.status(
-                _(
-                    b'(remote is using large file support (lfs), but it is '
-                    b'explicitly disabled in the local configuration)\n'
+            if extensions.disabled_help(b'lfs'):
+                ui.status(
+                    _(
+                        b'(remote is using large file support (lfs), but it is '
+                        b'explicitly disabled in the local configuration)\n'
+                    )
                 )
-            )
-        else:
-            ui.status(
-                _(
-                    b'(remote is using large file support (lfs); lfs will '
-                    b'be enabled for this repository)\n'
+            else:
+                ui.status(
+                    _(
+                        b'(remote is using large file support (lfs); lfs will '
+                        b'be enabled for this repository)\n'
+                    )
                 )
-            )
 
-    shareopts = shareopts or {}
-    sharepool = shareopts.get(b'pool')
-    sharenamemode = shareopts.get(b'mode')
-    if sharepool and islocal(dest):
-        sharepath = None
-        if sharenamemode == b'identity':
-            # Resolve the name from the initial changeset in the remote
-            # repository. This returns nullid when the remote is empty. It
-            # raises RepoLookupError if revision 0 is filtered or otherwise
-            # not available. If we fail to resolve, sharing is not enabled.
-            try:
-                with srcpeer.commandexecutor() as e:
-                    rootnode = e.callcommand(
-                        b'lookup',
-                        {
-                            b'key': b'0',
-                        },
-                    ).result()
+        shareopts = shareopts or {}
+        sharepool = shareopts.get(b'pool')
+        sharenamemode = shareopts.get(b'mode')
+        if sharepool and islocal(dest):
+            sharepath = None
+            if sharenamemode == b'identity':
+                # Resolve the name from the initial changeset in the remote
+                # repository. This returns nullid when the remote is empty. It
+                # raises RepoLookupError if revision 0 is filtered or otherwise
+                # not available. If we fail to resolve, sharing is not enabled.
+                try:
+                    with srcpeer.commandexecutor() as e:
+                        rootnode = e.callcommand(
+                            b'lookup',
+                            {
+                                b'key': b'0',
+                            },
+                        ).result()
 
-                if rootnode != nullid:
-                    sharepath = os.path.join(sharepool, hex(rootnode))
-                else:
+                    if rootnode != nullid:
+                        sharepath = os.path.join(sharepool, hex(rootnode))
+                    else:
+                        ui.status(
+                            _(
+                                b'(not using pooled storage: '
+                                b'remote appears to be empty)\n'
+                            )
+                        )
+                except error.RepoLookupError:
                     ui.status(
                         _(
                             b'(not using pooled storage: '
-                            b'remote appears to be empty)\n'
+                            b'unable to resolve identity of remote)\n'
                         )
                     )
-            except error.RepoLookupError:
-                ui.status(
-                    _(
-                        b'(not using pooled storage: '
-                        b'unable to resolve identity of remote)\n'
-                    )
+            elif sharenamemode == b'remote':
+                sharepath = os.path.join(
+                    sharepool, hex(hashutil.sha1(source).digest())
+                )
+            else:
+                raise error.Abort(
+                    _(b'unknown share naming mode: %s') % sharenamemode
                 )
-        elif sharenamemode == b'remote':
-            sharepath = os.path.join(
-                sharepool, hex(hashutil.sha1(source).digest())
-            )
-        else:
-            raise error.Abort(
-                _(b'unknown share naming mode: %s') % sharenamemode
-            )
+
+            # TODO this is a somewhat arbitrary restriction.
+            if narrow:
+                ui.status(
+                    _(b'(pooled storage not supported for narrow clones)\n')
+                )
+                sharepath = None
 
-        # TODO this is a somewhat arbitrary restriction.
-        if narrow:
-            ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
-            sharepath = None
+            if sharepath:
+                return clonewithshare(
+                    ui,
+                    peeropts,
+                    sharepath,
+                    source,
+                    srcpeer,
+                    dest,
+                    pull=pull,
+                    rev=revs,
+                    update=update,
+                    stream=stream,
+                )
 
-        if sharepath:
-            return clonewithshare(
-                ui,
-                peeropts,
-                sharepath,
-                source,
-                srcpeer,
-                dest,
-                pull=pull,
-                rev=revs,
-                update=update,
-                stream=stream,
-            )
+        srcrepo = srcpeer.local()
 
-    srclock = destlock = cleandir = None
-    srcrepo = srcpeer.local()
-    try:
         abspath = origsource
         if islocal(origsource):
             abspath = os.path.abspath(util.urllocalpath(origsource))
@@ -1055,6 +1063,8 @@
             shutil.rmtree(cleandir, True)
         if srcpeer is not None:
             srcpeer.close()
+        if destpeer and destpeer.local() is None:
+            destpeer.close()
     return srcpeer, destpeer
 
 
@@ -1114,6 +1124,7 @@
     assert stats.unresolvedcount == 0
     if show_stats:
         _showstats(repo, stats, quietempty)
+    return False
 
 
 # naming conflict in updatetotally()
@@ -1256,15 +1267,17 @@
     """
     source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
     other = peer(repo, opts, source)
-    ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
-    revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
+    cleanupfn = other.close
+    try:
+        ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
+        revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
 
-    if revs:
-        revs = [other.lookup(rev) for rev in revs]
-    other, chlist, cleanupfn = bundlerepo.getremotechanges(
-        ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
-    )
-    try:
+        if revs:
+            revs = [other.lookup(rev) for rev in revs]
+        other, chlist, cleanupfn = bundlerepo.getremotechanges(
+            ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
+        )
+
         if not chlist:
             ui.status(_(b"no changes found\n"))
             return subreporecurse()
@@ -1308,7 +1321,7 @@
 
 
 def _outgoing(ui, repo, dest, opts):
-    path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
+    path = ui.getpath(dest, default=(b'default-push', b'default'))
     if not path:
         raise error.Abort(
             _(b'default repository not configured!'),
@@ -1323,13 +1336,17 @@
         revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
 
     other = peer(repo, opts, dest)
-    outgoing = discovery.findcommonoutgoing(
-        repo, other, revs, force=opts.get(b'force')
-    )
-    o = outgoing.missing
-    if not o:
-        scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
-    return o, other
+    try:
+        outgoing = discovery.findcommonoutgoing(
+            repo, other, revs, force=opts.get(b'force')
+        )
+        o = outgoing.missing
+        if not o:
+            scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
+        return o, other
+    except:  # re-raises
+        other.close()
+        raise
 
 
 def outgoing(ui, repo, dest, opts):
@@ -1344,27 +1361,30 @@
 
     limit = logcmdutil.getlimit(opts)
     o, other = _outgoing(ui, repo, dest, opts)
-    if not o:
-        cmdutil.outgoinghooks(ui, repo, other, opts, o)
-        return recurse()
+    try:
+        if not o:
+            cmdutil.outgoinghooks(ui, repo, other, opts, o)
+            return recurse()
 
-    if opts.get(b'newest_first'):
-        o.reverse()
-    ui.pager(b'outgoing')
-    displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
-    count = 0
-    for n in o:
-        if limit is not None and count >= limit:
-            break
-        parents = [p for p in repo.changelog.parents(n) if p != nullid]
-        if opts.get(b'no_merges') and len(parents) == 2:
-            continue
-        count += 1
-        displayer.show(repo[n])
-    displayer.close()
-    cmdutil.outgoinghooks(ui, repo, other, opts, o)
-    recurse()
-    return 0  # exit code is zero since we found outgoing changes
+        if opts.get(b'newest_first'):
+            o.reverse()
+        ui.pager(b'outgoing')
+        displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
+        count = 0
+        for n in o:
+            if limit is not None and count >= limit:
+                break
+            parents = [p for p in repo.changelog.parents(n) if p != nullid]
+            if opts.get(b'no_merges') and len(parents) == 2:
+                continue
+            count += 1
+            displayer.show(repo[n])
+        displayer.close()
+        cmdutil.outgoinghooks(ui, repo, other, opts, o)
+        recurse()
+        return 0  # exit code is zero since we found outgoing changes
+    finally:
+        other.close()
 
 
 def verify(repo, level=None):
--- a/mercurial/httppeer.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/httppeer.py	Thu Mar 18 18:24:59 2021 -0400
@@ -171,9 +171,9 @@
         # Send arguments via HTTP headers.
         if headersize > 0:
             # The headers can typically carry more data than the URL.
-            encargs = urlreq.urlencode(sorted(args.items()))
+            encoded_args = urlreq.urlencode(sorted(args.items()))
             for header, value in encodevalueinheaders(
-                encargs, b'X-HgArg', headersize
+                encoded_args, b'X-HgArg', headersize
             ):
                 headers[header] = value
         # Send arguments via query string (Mercurial <1.9).
--- a/mercurial/i18n.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/i18n.py	Thu Mar 18 18:24:59 2021 -0400
@@ -19,6 +19,14 @@
     pycompat,
 )
 
+if pycompat.TYPE_CHECKING:
+    from typing import (
+        Callable,
+        List,
+        Optional,
+    )
+
+
 # modelled after templater.templatepath:
 if getattr(sys, 'frozen', None) is not None:
     module = pycompat.sysexecutable
@@ -40,7 +48,10 @@
     try:
         import ctypes
 
+        # pytype: disable=module-attr
         langid = ctypes.windll.kernel32.GetUserDefaultUILanguage()
+        # pytype: enable=module-attr
+
         _languages = [locale.windows_locale[langid]]
     except (ImportError, AttributeError, KeyError):
         # ctypes not found or unknown langid
@@ -51,7 +62,7 @@
 localedir = os.path.join(datapath, 'locale')
 t = gettextmod.translation('hg', localedir, _languages, fallback=True)
 try:
-    _ugettext = t.ugettext
+    _ugettext = t.ugettext  # pytype: disable=attribute-error
 except AttributeError:
     _ugettext = t.gettext
 
@@ -60,6 +71,7 @@
 
 
 def gettext(message):
+    # type: (Optional[bytes]) -> Optional[bytes]
     """Translate message.
 
     The message is looked up in the catalog to get a Unicode string,
@@ -77,7 +89,7 @@
     if message not in cache:
         if type(message) is pycompat.unicode:
             # goofy unicode docstrings in test
-            paragraphs = message.split(u'\n\n')
+            paragraphs = message.split(u'\n\n')  # type: List[pycompat.unicode]
         else:
             # should be ascii, but we have unicode docstrings in test, which
             # are converted to utf-8 bytes on Python 3.
@@ -110,6 +122,6 @@
 
 
 if _plain():
-    _ = lambda message: message
+    _ = lambda message: message  # type: Callable[[bytes], bytes]
 else:
     _ = gettext
--- a/mercurial/interfaces/dirstate.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/interfaces/dirstate.py	Thu Mar 18 18:24:59 2021 -0400
@@ -8,7 +8,7 @@
 
 
 class idirstate(interfaceutil.Interface):
-    def __init__(opener, ui, root, validate, sparsematchfn):
+    def __init__(opener, ui, root, validate, sparsematchfn, nodeconstants):
         """Create a new dirstate object.
 
         opener is an open()-like callable that can be used to open the
--- a/mercurial/interfaces/repository.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/interfaces/repository.py	Thu Mar 18 18:24:59 2021 -0400
@@ -453,6 +453,10 @@
         """
     )
 
+    sidedata = interfaceutil.Attribute(
+        """Raw sidedata bytes for the given revision."""
+    )
+
 
 class ifilerevisionssequence(interfaceutil.Interface):
     """Contains index data for all revisions of a file.
@@ -519,6 +523,10 @@
     * Metadata to facilitate storage.
     """
 
+    nullid = interfaceutil.Attribute(
+        """node for the null revision for use as delta base."""
+    )
+
     def __len__():
         """Obtain the number of revisions stored for this file."""
 
@@ -734,7 +742,7 @@
         flags=0,
         cachedelta=None,
     ):
-        """Add a new revision to the store.
+        """Add a new revision to the store and return its number.
 
         This is similar to ``add()`` except it operates at a lower level.
 
@@ -769,7 +777,14 @@
         ``nullid``, in which case the header from the delta can be ignored
         and the delta used as the fulltext.
 
-        ``addrevisioncb`` should be called for each node as it is committed.
+        ``alwayscache`` instructs the lower layers to cache the content of the
+        newly added revision, even if it needs to be explicitly computed.
+        This used to be the default when ``addrevisioncb`` was provided up to
+        Mercurial 5.8.
+
+        ``addrevisioncb`` should be called for each new rev as it is committed.
+        ``duplicaterevisioncb`` should be called for all revs with a
+        pre-existing node.
 
         ``maybemissingparents`` is a bool indicating whether the incoming
         data may reference parents/ancestor revisions that aren't present.
@@ -1132,6 +1147,10 @@
 class imanifeststorage(interfaceutil.Interface):
     """Storage interface for manifest data."""
 
+    nodeconstants = interfaceutil.Attribute(
+        """nodeconstants used by the current repository."""
+    )
+
     tree = interfaceutil.Attribute(
         """The path to the directory this manifest tracks.
 
@@ -1355,6 +1374,10 @@
     tree manifests.
     """
 
+    nodeconstants = interfaceutil.Attribute(
+        """nodeconstants used by the current repository."""
+    )
+
     def __getitem__(node):
         """Obtain a manifest instance for a given binary node.
 
@@ -1423,6 +1446,13 @@
     This currently captures the reality of things - not how things should be.
     """
 
+    nodeconstants = interfaceutil.Attribute(
+        """Constant nodes matching the hash function used by the repository."""
+    )
+    nullid = interfaceutil.Attribute(
+        """null revision for the hash function used by the repository."""
+    )
+
     supportedformats = interfaceutil.Attribute(
         """Set of requirements that apply to stream clone.
 
@@ -1641,6 +1671,14 @@
     def revbranchcache():
         pass
 
+    def register_changeset(rev, changelogrevision):
+        """Extension point for caches for new nodes.
+
+        Multiple consumers are expected to need parts of the changelogrevision,
+        so it is provided as optimization to avoid duplicate lookups. A simple
+        cache would be fragile when other revisions are accessed, too."""
+        pass
+
     def branchtip(branchtip, ignoremissing=False):
         """Return the tip node for a given branch."""
 
@@ -1813,6 +1851,12 @@
     def savecommitmessage(text):
         pass
 
+    def register_sidedata_computer(kind, category, keys, computer):
+        pass
+
+    def register_wanted_sidedata(category):
+        pass
+
 
 class completelocalrepository(
     ilocalrepositorymain, ilocalrepositoryfilestorage
--- a/mercurial/localrepo.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/localrepo.py	Thu Mar 18 18:24:59 2021 -0400
@@ -21,6 +21,7 @@
     hex,
     nullid,
     nullrev,
+    sha1nodeconstants,
     short,
 )
 from .pycompat import (
@@ -49,6 +50,7 @@
     match as matchmod,
     mergestate as mergestatemod,
     mergeutil,
+    metadata as metadatamod,
     namespaces,
     narrowspec,
     obsolete,
@@ -84,7 +86,10 @@
     stringutil,
 )
 
-from .revlogutils import constants as revlogconst
+from .revlogutils import (
+    concurrency_checker as revlogchecker,
+    constants as revlogconst,
+)
 
 release = lockmod.release
 urlerr = util.urlerr
@@ -270,6 +275,11 @@
             caps = moderncaps.copy()
         self._repo = repo.filtered(b'served')
         self.ui = repo.ui
+
+        if repo._wanted_sidedata:
+            formatted = bundle2.format_remote_wanted_sidedata(repo)
+            caps.add(b'exp-wanted-sidedata=' + formatted)
+
         self._caps = repo._restrictcapabilities(caps)
 
     # Begin of _basepeer interface.
@@ -313,7 +323,13 @@
         )
 
     def getbundle(
-        self, source, heads=None, common=None, bundlecaps=None, **kwargs
+        self,
+        source,
+        heads=None,
+        common=None,
+        bundlecaps=None,
+        remote_sidedata=None,
+        **kwargs
     ):
         chunks = exchange.getbundlechunks(
             self._repo,
@@ -321,6 +337,7 @@
             heads=heads,
             common=common,
             bundlecaps=bundlecaps,
+            remote_sidedata=remote_sidedata,
             **kwargs
         )[1]
         cb = util.chunkbuffer(chunks)
@@ -939,11 +956,10 @@
 
 def makestore(requirements, path, vfstype):
     """Construct a storage object for a repository."""
-    if b'store' in requirements:
-        if b'fncache' in requirements:
-            return storemod.fncachestore(
-                path, vfstype, b'dotencode' in requirements
-            )
+    if requirementsmod.STORE_REQUIREMENT in requirements:
+        if requirementsmod.FNCACHE_REQUIREMENT in requirements:
+            dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
+            return storemod.fncachestore(path, vfstype, dotencode)
 
         return storemod.encodedstore(path, vfstype)
 
@@ -971,7 +987,7 @@
     # opener options for it because those options wouldn't do anything
     # meaningful on such old repos.
     if (
-        b'revlogv1' in requirements
+        requirementsmod.REVLOGV1_REQUIREMENT in requirements
         or requirementsmod.REVLOGV2_REQUIREMENT in requirements
     ):
         options.update(resolverevlogstorevfsoptions(ui, requirements, features))
@@ -995,12 +1011,12 @@
     options = {}
     options[b'flagprocessors'] = {}
 
-    if b'revlogv1' in requirements:
+    if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
         options[b'revlogv1'] = True
     if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
         options[b'revlogv2'] = True
 
-    if b'generaldelta' in requirements:
+    if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
         options[b'generaldelta'] = True
 
     # experimental config: format.chunkcachesize
@@ -1196,8 +1212,8 @@
     #    being successful (repository sizes went up due to worse delta
     #    chains), and the code was deleted in 4.6.
     supportedformats = {
-        b'revlogv1',
-        b'generaldelta',
+        requirementsmod.REVLOGV1_REQUIREMENT,
+        requirementsmod.GENERALDELTA_REQUIREMENT,
         requirementsmod.TREEMANIFEST_REQUIREMENT,
         requirementsmod.COPIESSDC_REQUIREMENT,
         requirementsmod.REVLOGV2_REQUIREMENT,
@@ -1208,11 +1224,11 @@
         requirementsmod.SHARESAFE_REQUIREMENT,
     }
     _basesupported = supportedformats | {
-        b'store',
-        b'fncache',
+        requirementsmod.STORE_REQUIREMENT,
+        requirementsmod.FNCACHE_REQUIREMENT,
         requirementsmod.SHARED_REQUIREMENT,
         requirementsmod.RELATIVE_SHARED_REQUIREMENT,
-        b'dotencode',
+        requirementsmod.DOTENCODE_REQUIREMENT,
         requirementsmod.SPARSE_REQUIREMENT,
         requirementsmod.INTERNAL_PHASE_REQUIREMENT,
     }
@@ -1315,6 +1331,8 @@
         self.vfs = hgvfs
         self.path = hgvfs.base
         self.requirements = requirements
+        self.nodeconstants = sha1nodeconstants
+        self.nullid = self.nodeconstants.nullid
         self.supported = supportedrequirements
         self.sharedpath = sharedpath
         self.store = store
@@ -1386,6 +1404,10 @@
         if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
             self.filecopiesmode = b'changeset-sidedata'
 
+        self._wanted_sidedata = set()
+        self._sidedata_computers = {}
+        metadatamod.set_sidedata_spec_for_repo(self)
+
     def _getvfsward(self, origfunc):
         """build a ward for self.vfs"""
         rref = weakref.ref(self)
@@ -1639,7 +1661,10 @@
     def changelog(self):
         # load dirstate before changelog to avoid race see issue6303
         self.dirstate.prefetch_parents()
-        return self.store.changelog(txnutil.mayhavepending(self.root))
+        return self.store.changelog(
+            txnutil.mayhavepending(self.root),
+            concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
+        )
 
     @storecache(b'00manifest.i')
     def manifestlog(self):
@@ -1654,7 +1679,12 @@
         sparsematchfn = lambda: sparse.matcher(self)
 
         return dirstate.dirstate(
-            self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
+            self.vfs,
+            self.ui,
+            self.root,
+            self._dirstatevalidate,
+            sparsematchfn,
+            self.nodeconstants,
         )
 
     def _dirstatevalidate(self, node):
@@ -2059,6 +2089,9 @@
             self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
         return self._revbranchcache
 
+    def register_changeset(self, rev, changelogrevision):
+        self.revbranchcache().setdata(rev, changelogrevision)
+
     def branchtip(self, branch, ignoremissing=False):
         """return the tip node for a given branch
 
@@ -3326,6 +3359,22 @@
             fp.close()
         return self.pathto(fp.name[len(self.root) + 1 :])
 
+    def register_wanted_sidedata(self, category):
+        self._wanted_sidedata.add(pycompat.bytestr(category))
+
+    def register_sidedata_computer(self, kind, category, keys, computer):
+        if kind not in (b"changelog", b"manifest", b"filelog"):
+            msg = _(b"unexpected revlog kind '%s'.")
+            raise error.ProgrammingError(msg % kind)
+        category = pycompat.bytestr(category)
+        if category in self._sidedata_computers.get(kind, []):
+            msg = _(
+                b"cannot register a sidedata computer twice for category '%s'."
+            )
+            raise error.ProgrammingError(msg % category)
+        self._sidedata_computers.setdefault(kind, {})
+        self._sidedata_computers[kind][category] = (keys, computer)
+
 
 # used to avoid circular references so destructors work
 def aftertrans(files):
@@ -3410,13 +3459,13 @@
             % createopts[b'backend']
         )
 
-    requirements = {b'revlogv1'}
+    requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
     if ui.configbool(b'format', b'usestore'):
-        requirements.add(b'store')
+        requirements.add(requirementsmod.STORE_REQUIREMENT)
         if ui.configbool(b'format', b'usefncache'):
-            requirements.add(b'fncache')
+            requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
             if ui.configbool(b'format', b'dotencode'):
-                requirements.add(b'dotencode')
+                requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
 
     compengines = ui.configlist(b'format', b'revlog-compression')
     for compengine in compengines:
@@ -3442,15 +3491,19 @@
         requirements.add(b'exp-compression-%s' % compengine)
 
     if scmutil.gdinitconfig(ui):
-        requirements.add(b'generaldelta')
+        requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
         if ui.configbool(b'format', b'sparse-revlog'):
             requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
 
     # experimental config: format.exp-use-side-data
     if ui.configbool(b'format', b'exp-use-side-data'):
+        requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
+        requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
         requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
     # experimental config: format.exp-use-copies-side-data-changeset
     if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
+        requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
+        requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
         requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
         requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
     if ui.configbool(b'experimental', b'treemanifest'):
@@ -3458,9 +3511,9 @@
 
     revlogv2 = ui.config(b'experimental', b'revlogv2')
     if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
-        requirements.remove(b'revlogv1')
+        requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
         # generaldelta is implied by revlogv2.
-        requirements.discard(b'generaldelta')
+        requirements.discard(requirementsmod.GENERALDELTA_REQUIREMENT)
         requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
     # experimental config: format.internal-phase
     if ui.configbool(b'format', b'internal-phase'):
@@ -3494,7 +3547,7 @@
 
     dropped = set()
 
-    if b'store' not in requirements:
+    if requirementsmod.STORE_REQUIREMENT not in requirements:
         if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
             ui.warn(
                 _(
@@ -3633,7 +3686,8 @@
         hgvfs.mkdir(b'cache')
     hgvfs.mkdir(b'wcache')
 
-    if b'store' in requirements and b'sharedrepo' not in createopts:
+    has_store = requirementsmod.STORE_REQUIREMENT in requirements
+    if has_store and b'sharedrepo' not in createopts:
         hgvfs.mkdir(b'store')
 
         # We create an invalid changelog outside the store so very old
@@ -3642,11 +3696,11 @@
         # effectively locks out old clients and prevents them from
         # mucking with a repo in an unknown format.
         #
-        # The revlog header has version 2, which won't be recognized by
+        # The revlog header has version 65535, which won't be recognized by
         # such old clients.
         hgvfs.append(
             b'00changelog.i',
-            b'\0\0\0\2 dummy changelog to prevent using the old repo '
+            b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
             b'layout',
         )
 
--- a/mercurial/logcmdutil.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/logcmdutil.py	Thu Mar 18 18:24:59 2021 -0400
@@ -27,6 +27,7 @@
     graphmod,
     match as matchmod,
     mdiff,
+    merge,
     patch,
     pathutil,
     pycompat,
@@ -74,6 +75,36 @@
     return limit
 
 
+def diff_parent(ctx):
+    """get the context object to use as parent when diffing
+
+
+    If diff.merge is enabled, an overlayworkingctx of the auto-merged parents will be returned.
+    """
+    repo = ctx.repo()
+    if repo.ui.configbool(b"diff", b"merge") and ctx.p2().node() != nullid:
+        # avoid cycle context -> subrepo -> cmdutil -> logcmdutil
+        from . import context
+
+        wctx = context.overlayworkingctx(repo)
+        wctx.setbase(ctx.p1())
+        with repo.ui.configoverride(
+            {
+                (
+                    b"ui",
+                    b"forcemerge",
+                ): b"internal:merge3-lie-about-conflicts",
+            },
+            b"merge-diff",
+        ):
+            repo.ui.pushbuffer()
+            merge.merge(ctx.p2(), wc=wctx)
+            repo.ui.popbuffer()
+        return wctx
+    else:
+        return ctx.p1()
+
+
 def diffordiffstat(
     ui,
     repo,
@@ -217,7 +248,7 @@
             ui,
             ctx.repo(),
             diffopts,
-            ctx.p1(),
+            diff_parent(ctx),
             ctx,
             match=self._makefilematcher(ctx),
             stat=stat,
--- a/mercurial/manifest.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/manifest.py	Thu Mar 18 18:24:59 2021 -0400
@@ -792,8 +792,9 @@
 
 @interfaceutil.implementer(repository.imanifestdict)
 class treemanifest(object):
-    def __init__(self, dir=b'', text=b''):
+    def __init__(self, nodeconstants, dir=b'', text=b''):
         self._dir = dir
+        self.nodeconstants = nodeconstants
         self._node = nullid
         self._loadfunc = _noop
         self._copyfunc = _noop
@@ -1051,7 +1052,9 @@
         if dir:
             self._loadlazy(dir)
             if dir not in self._dirs:
-                self._dirs[dir] = treemanifest(self._subpath(dir))
+                self._dirs[dir] = treemanifest(
+                    self.nodeconstants, self._subpath(dir)
+                )
             self._dirs[dir].__setitem__(subpath, n)
         else:
             # manifest nodes are either 20 bytes or 32 bytes,
@@ -1078,14 +1081,16 @@
         if dir:
             self._loadlazy(dir)
             if dir not in self._dirs:
-                self._dirs[dir] = treemanifest(self._subpath(dir))
+                self._dirs[dir] = treemanifest(
+                    self.nodeconstants, self._subpath(dir)
+                )
             self._dirs[dir].setflag(subpath, flags)
         else:
             self._flags[f] = flags
         self._dirty = True
 
     def copy(self):
-        copy = treemanifest(self._dir)
+        copy = treemanifest(self.nodeconstants, self._dir)
         copy._node = self._node
         copy._dirty = self._dirty
         if self._copyfunc is _noop:
@@ -1215,7 +1220,7 @@
         visit = match.visitchildrenset(self._dir[:-1])
         if visit == b'all':
             return self.copy()
-        ret = treemanifest(self._dir)
+        ret = treemanifest(self.nodeconstants, self._dir)
         if not visit:
             return ret
 
@@ -1272,7 +1277,7 @@
             m2 = m2._matches(match)
             return m1.diff(m2, clean=clean)
         result = {}
-        emptytree = treemanifest()
+        emptytree = treemanifest(self.nodeconstants)
 
         def _iterativediff(t1, t2, stack):
             """compares two tree manifests and append new tree-manifests which
@@ -1368,7 +1373,7 @@
         self._load()  # for consistency; should never have any effect here
         m1._load()
         m2._load()
-        emptytree = treemanifest()
+        emptytree = treemanifest(self.nodeconstants)
 
         def getnode(m, d):
             ld = m._lazydirs.get(d)
@@ -1551,6 +1556,7 @@
 
     def __init__(
         self,
+        nodeconstants,
         opener,
         tree=b'',
         dirlogcache=None,
@@ -1567,6 +1573,7 @@
         option takes precedence, so if it is set to True, we ignore whatever
         value is passed in to the constructor.
         """
+        self.nodeconstants = nodeconstants
         # During normal operations, we expect to deal with not more than four
         # revs at a time (such as during commit --amend). When rebasing large
         # stacks of commits, the number can go up, hence the config knob below.
@@ -1610,6 +1617,7 @@
         self.index = self._revlog.index
         self.version = self._revlog.version
         self._generaldelta = self._revlog._generaldelta
+        self._revlog.revlog_kind = b'manifest'
 
     def _setupmanifestcachehooks(self, repo):
         """Persist the manifestfulltextcache on lock release"""
@@ -1653,7 +1661,11 @@
             assert self._treeondisk
         if d not in self._dirlogcache:
             mfrevlog = manifestrevlog(
-                self.opener, d, self._dirlogcache, treemanifest=self._treeondisk
+                self.nodeconstants,
+                self.opener,
+                d,
+                self._dirlogcache,
+                treemanifest=self._treeondisk,
             )
             self._dirlogcache[d] = mfrevlog
         return self._dirlogcache[d]
@@ -1704,9 +1716,10 @@
             arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
             cachedelta = self._revlog.rev(p1), deltatext
             text = util.buffer(arraytext)
-            n = self._revlog.addrevision(
+            rev = self._revlog.addrevision(
                 text, transaction, link, p1, p2, cachedelta
             )
+            n = self._revlog.node(rev)
         except FastdeltaUnavailable:
             # The first parent manifest isn't already loaded or the
             # manifest implementation doesn't support fastdelta, so
@@ -1724,7 +1737,8 @@
                 arraytext = None
             else:
                 text = m.text()
-                n = self._revlog.addrevision(text, transaction, link, p1, p2)
+                rev = self._revlog.addrevision(text, transaction, link, p1, p2)
+                n = self._revlog.node(rev)
                 arraytext = bytearray(text)
 
         if arraytext is not None:
@@ -1765,9 +1779,10 @@
                 n = m2.node()
 
         if not n:
-            n = self._revlog.addrevision(
+            rev = self._revlog.addrevision(
                 text, transaction, link, m1.node(), m2.node()
             )
+            n = self._revlog.node(rev)
 
         # Save nodeid so parent manifest can calculate its nodeid
         m.setnode(n)
@@ -1822,6 +1837,7 @@
         revisiondata=False,
         assumehaveparentrevisions=False,
         deltamode=repository.CG_DELTAMODE_STD,
+        sidedata_helpers=None,
     ):
         return self._revlog.emitrevisions(
             nodes,
@@ -1829,6 +1845,7 @@
             revisiondata=revisiondata,
             assumehaveparentrevisions=assumehaveparentrevisions,
             deltamode=deltamode,
+            sidedata_helpers=sidedata_helpers,
         )
 
     def addgroup(
@@ -1836,6 +1853,7 @@
         deltas,
         linkmapper,
         transaction,
+        alwayscache=False,
         addrevisioncb=None,
         duplicaterevisioncb=None,
     ):
@@ -1843,6 +1861,7 @@
             deltas,
             linkmapper,
             transaction,
+            alwayscache=alwayscache,
             addrevisioncb=addrevisioncb,
             duplicaterevisioncb=duplicaterevisioncb,
         )
@@ -1909,6 +1928,7 @@
     they receive (i.e. tree or flat or lazily loaded, etc)."""
 
     def __init__(self, opener, repo, rootstore, narrowmatch):
+        self.nodeconstants = repo.nodeconstants
         usetreemanifest = False
         cachesize = 4
 
@@ -1947,7 +1967,7 @@
 
         if not self._narrowmatch.always():
             if not self._narrowmatch.visitdir(tree[:-1]):
-                return excludeddirmanifestctx(tree, node)
+                return excludeddirmanifestctx(self.nodeconstants, tree, node)
         if tree:
             if self._rootstore._treeondisk:
                 if verify:
@@ -2110,7 +2130,7 @@
     def __init__(self, manifestlog, dir=b''):
         self._manifestlog = manifestlog
         self._dir = dir
-        self._treemanifest = treemanifest()
+        self._treemanifest = treemanifest(manifestlog.nodeconstants)
 
     def _storage(self):
         return self._manifestlog.getstorage(b'')
@@ -2160,17 +2180,19 @@
         narrowmatch = self._manifestlog._narrowmatch
         if not narrowmatch.always():
             if not narrowmatch.visitdir(self._dir[:-1]):
-                return excludedmanifestrevlog(self._dir)
+                return excludedmanifestrevlog(
+                    self._manifestlog.nodeconstants, self._dir
+                )
         return self._manifestlog.getstorage(self._dir)
 
     def read(self):
         if self._data is None:
             store = self._storage()
             if self._node == nullid:
-                self._data = treemanifest()
+                self._data = treemanifest(self._manifestlog.nodeconstants)
             # TODO accessing non-public API
             elif store._treeondisk:
-                m = treemanifest(dir=self._dir)
+                m = treemanifest(self._manifestlog.nodeconstants, dir=self._dir)
 
                 def gettext():
                     return store.revision(self._node)
@@ -2190,7 +2212,9 @@
                     text = store.revision(self._node)
                     arraytext = bytearray(text)
                     store.fulltextcache[self._node] = arraytext
-                self._data = treemanifest(dir=self._dir, text=text)
+                self._data = treemanifest(
+                    self._manifestlog.nodeconstants, dir=self._dir, text=text
+                )
 
         return self._data
 
@@ -2227,7 +2251,7 @@
             r0 = store.deltaparent(store.rev(self._node))
             m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
             m1 = self.read()
-            md = treemanifest(dir=self._dir)
+            md = treemanifest(self._manifestlog.nodeconstants, dir=self._dir)
             for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)):
                 if n1:
                     md[f] = n1
@@ -2270,8 +2294,8 @@
     whose contents are unknown.
     """
 
-    def __init__(self, dir, node):
-        super(excludeddir, self).__init__(dir)
+    def __init__(self, nodeconstants, dir, node):
+        super(excludeddir, self).__init__(nodeconstants, dir)
         self._node = node
         # Add an empty file, which will be included by iterators and such,
         # appearing as the directory itself (i.e. something like "dir/")
@@ -2290,12 +2314,13 @@
 class excludeddirmanifestctx(treemanifestctx):
     """context wrapper for excludeddir - see that docstring for rationale"""
 
-    def __init__(self, dir, node):
+    def __init__(self, nodeconstants, dir, node):
+        self.nodeconstants = nodeconstants
         self._dir = dir
         self._node = node
 
     def read(self):
-        return excludeddir(self._dir, self._node)
+        return excludeddir(self.nodeconstants, self._dir, self._node)
 
     def readfast(self, shallow=False):
         # special version of readfast since we don't have underlying storage
@@ -2317,7 +2342,8 @@
     outside the narrowspec.
     """
 
-    def __init__(self, dir):
+    def __init__(self, nodeconstants, dir):
+        self.nodeconstants = nodeconstants
         self._dir = dir
 
     def __len__(self):
--- a/mercurial/merge.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/merge.py	Thu Mar 18 18:24:59 2021 -0400
@@ -234,7 +234,7 @@
         else:
             warn(_(b"%s: untracked file differs\n") % f)
     if abortconflicts:
-        raise error.Abort(
+        raise error.StateError(
             _(
                 b"untracked files in working directory "
                 b"differ from files in requested revision"
@@ -342,7 +342,7 @@
     for f in pmmf:
         fold = util.normcase(f)
         if fold in foldmap:
-            raise error.Abort(
+            raise error.StateError(
                 _(b"case-folding collision between %s and %s")
                 % (f, foldmap[fold])
             )
@@ -353,7 +353,7 @@
     for fold, f in sorted(foldmap.items()):
         if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
             # the folded prefix matches but actual casing is different
-            raise error.Abort(
+            raise error.StateError(
                 _(b"case-folding collision between %s and directory of %s")
                 % (lastfull, f)
             )
@@ -505,7 +505,9 @@
     if invalidconflicts:
         for p in invalidconflicts:
             repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
-        raise error.Abort(_(b"destination manifest contains path conflicts"))
+        raise error.StateError(
+            _(b"destination manifest contains path conflicts")
+        )
 
 
 def _filternarrowactions(narrowmatch, branchmerge, mresult):
@@ -1919,10 +1921,10 @@
         ### check phase
         if not overwrite:
             if len(pl) > 1:
-                raise error.Abort(_(b"outstanding uncommitted merge"))
+                raise error.StateError(_(b"outstanding uncommitted merge"))
             ms = wc.mergestate()
-            if list(ms.unresolved()):
-                raise error.Abort(
+            if ms.unresolvedcount():
+                raise error.StateError(
                     _(b"outstanding merge conflicts"),
                     hint=_(b"use 'hg resolve' to resolve"),
                 )
@@ -2008,7 +2010,7 @@
             if mresult.hasconflicts():
                 msg = _(b"conflicting changes")
                 hint = _(b"commit or update --clean to discard changes")
-                raise error.Abort(msg, hint=hint)
+                raise error.StateError(msg, hint=hint)
 
         # Prompt and create actions. Most of this is in the resolve phase
         # already, but we can't handle .hgsubstate in filemerge or
@@ -2325,6 +2327,7 @@
     removefiles=True,
     abortonerror=False,
     noop=False,
+    confirm=False,
 ):
     """Purge the working directory of untracked files.
 
@@ -2345,6 +2348,8 @@
     ``noop`` controls whether to actually remove files. If not defined, actions
     will be taken.
 
+    ``confirm`` ask confirmation before actually removing anything.
+
     Returns an iterable of relative paths in the working directory that were
     or would be removed.
     """
@@ -2372,6 +2377,35 @@
 
         status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
 
+        if confirm:
+            nb_ignored = len(status.ignored)
+            nb_unkown = len(status.unknown)
+            if nb_unkown and nb_ignored:
+                msg = _(b"permanently delete %d unkown and %d ignored files?")
+                msg %= (nb_unkown, nb_ignored)
+            elif nb_unkown:
+                msg = _(b"permanently delete %d unkown files?")
+                msg %= nb_unkown
+            elif nb_ignored:
+                msg = _(b"permanently delete %d ignored files?")
+                msg %= nb_ignored
+            elif removeemptydirs:
+                dir_count = 0
+                for f in directories:
+                    if matcher(f) and not repo.wvfs.listdir(f):
+                        dir_count += 1
+                if dir_count:
+                    msg = _(
+                        b"permanently delete at least %d empty directories?"
+                    )
+                    msg %= dir_count
+                else:
+                    # XXX we might be missing directory there
+                    return res
+            msg += b" (yN)$$ &Yes $$ &No"
+            if repo.ui.promptchoice(msg, default=1) == 1:
+                raise error.CanceledError(_(b'removal cancelled'))
+
         if removefiles:
             for f in sorted(status.unknown + status.ignored):
                 if not noop:
--- a/mercurial/mergestate.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/mergestate.py	Thu Mar 18 18:24:59 2021 -0400
@@ -382,7 +382,6 @@
         if merge_ret is None:
             # If return value of merge is None, then there are no real conflict
             del self._state[dfile]
-            self._stateextras.pop(dfile, None)
             self._dirty = True
         elif not merge_ret:
             self.mark(dfile, MERGE_RECORD_RESOLVED)
--- a/mercurial/mergeutil.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/mergeutil.py	Thu Mar 18 18:24:59 2021 -0400
@@ -13,7 +13,7 @@
 
 
 def checkunresolved(ms):
-    if list(ms.unresolved()):
+    if ms.unresolvedcount():
         raise error.StateError(
             _(b"unresolved merge conflicts (see 'hg help resolve')")
         )
--- a/mercurial/metadata.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/metadata.py	Thu Mar 18 18:24:59 2021 -0400
@@ -18,6 +18,7 @@
 from . import (
     error,
     pycompat,
+    requirements as requirementsmod,
     util,
 )
 
@@ -804,6 +805,21 @@
     return encode_files_sidedata(files), files.has_copies_info
 
 
+def copies_sidedata_computer(repo, revlog, rev, existing_sidedata):
+    return _getsidedata(repo, rev)[0]
+
+
+def set_sidedata_spec_for_repo(repo):
+    if requirementsmod.COPIESSDC_REQUIREMENT in repo.requirements:
+        repo.register_wanted_sidedata(sidedatamod.SD_FILES)
+        repo.register_sidedata_computer(
+            b"changelog",
+            sidedatamod.SD_FILES,
+            (sidedatamod.SD_FILES,),
+            copies_sidedata_computer,
+        )
+
+
 def getsidedataadder(srcrepo, destrepo):
     use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
     if pycompat.iswindows or not use_w:
@@ -882,14 +898,14 @@
         data = {}, False
         if util.safehasattr(revlog, b'filteredrevs'):  # this is a changelog
             # Is the data previously shelved ?
-            sidedata = staging.pop(rev, None)
-            if sidedata is None:
+            data = staging.pop(rev, None)
+            if data is None:
                 # look at the queued result until we find the one we are lookig
                 # for (shelve the other ones)
                 r, data = sidedataq.get()
                 while r != rev:
                     staging[r] = data
-                    r, sidedata = sidedataq.get()
+                    r, data = sidedataq.get()
             tokens.release()
         sidedata, has_copies_info = data
         new_flag = 0
--- a/mercurial/minirst.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/minirst.py	Thu Mar 18 18:24:59 2021 -0400
@@ -158,7 +158,7 @@
 _optionre = re.compile(
     br'^(-([a-zA-Z0-9]), )?(--[a-z0-9-]+)' br'((.*)  +)(.*)$'
 )
-_fieldre = re.compile(br':(?![: ])([^:]*)(?<! ):[ ]+(.*)')
+_fieldre = re.compile(br':(?![: ])((?:\:|[^:])*)(?<! ):[ ]+(.*)')
 _definitionre = re.compile(br'[^ ]')
 _tablere = re.compile(br'(=+\s+)*=+')
 
@@ -229,7 +229,7 @@
             m = _fieldre.match(blocks[j][b'lines'][0])
             key, rest = m.groups()
             blocks[j][b'lines'][0] = rest
-            blocks[j][b'key'] = key
+            blocks[j][b'key'] = key.replace(br'\:', b':')
             j += 1
 
         i = j + 1
--- a/mercurial/narrowspec.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/narrowspec.py	Thu Mar 18 18:24:59 2021 -0400
@@ -329,7 +329,6 @@
     trackeddirty = status.modified + status.added
     clean = status.clean
     if assumeclean:
-        assert not trackeddirty
         clean.extend(lookup)
     else:
         trackeddirty.extend(lookup)
--- a/mercurial/node.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/node.py	Thu Mar 18 18:24:59 2021 -0400
@@ -21,29 +21,48 @@
         raise TypeError(e)
 
 
-nullrev = -1
-# In hex, this is '0000000000000000000000000000000000000000'
-nullid = b"\0" * 20
-nullhex = hex(nullid)
+def short(node):
+    return hex(node[:6])
+
 
-# Phony node value to stand-in for new files in some uses of
-# manifests.
-# In hex, this is '2121212121212121212121212121212121212121'
-newnodeid = b'!!!!!!!!!!!!!!!!!!!!'
-# In hex, this is '3030303030303030303030303030306164646564'
-addednodeid = b'000000000000000added'
-# In hex, this is '3030303030303030303030306d6f646966696564'
-modifiednodeid = b'000000000000modified'
+nullrev = -1
 
-wdirfilenodeids = {newnodeid, addednodeid, modifiednodeid}
-
-# pseudo identifiers for working directory
-# (they are experimental, so don't add too many dependencies on them)
+# pseudo identifier for working directory
+# (experimental, so don't add too many dependencies on it)
 wdirrev = 0x7FFFFFFF
-# In hex, this is 'ffffffffffffffffffffffffffffffffffffffff'
-wdirid = b"\xff" * 20
-wdirhex = hex(wdirid)
 
 
-def short(node):
-    return hex(node[:6])
+class sha1nodeconstants(object):
+    nodelen = 20
+
+    # In hex, this is '0000000000000000000000000000000000000000'
+    nullid = b"\0" * nodelen
+    nullhex = hex(nullid)
+
+    # Phony node value to stand-in for new files in some uses of
+    # manifests.
+    # In hex, this is '2121212121212121212121212121212121212121'
+    newnodeid = b'!!!!!!!!!!!!!!!!!!!!'
+    # In hex, this is '3030303030303030303030303030306164646564'
+    addednodeid = b'000000000000000added'
+    # In hex, this is '3030303030303030303030306d6f646966696564'
+    modifiednodeid = b'000000000000modified'
+
+    wdirfilenodeids = {newnodeid, addednodeid, modifiednodeid}
+
+    # pseudo identifier for working directory
+    # (experimental, so don't add too many dependencies on it)
+    # In hex, this is 'ffffffffffffffffffffffffffffffffffffffff'
+    wdirid = b"\xff" * nodelen
+    wdirhex = hex(wdirid)
+
+
+# legacy starting point for porting modules
+nullid = sha1nodeconstants.nullid
+nullhex = sha1nodeconstants.nullhex
+newnodeid = sha1nodeconstants.newnodeid
+addednodeid = sha1nodeconstants.addednodeid
+modifiednodeid = sha1nodeconstants.modifiednodeid
+wdirfilenodeids = sha1nodeconstants.wdirfilenodeids
+wdirid = sha1nodeconstants.wdirid
+wdirhex = sha1nodeconstants.wdirhex
--- a/mercurial/obsolete.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/obsolete.py	Thu Mar 18 18:24:59 2021 -0400
@@ -560,10 +560,11 @@
     # parents: (tuple of nodeid) or None, parents of predecessors
     #          None is used when no data has been recorded
 
-    def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
+    def __init__(self, repo, svfs, defaultformat=_fm1version, readonly=False):
         # caches for various obsolescence related cache
         self.caches = {}
         self.svfs = svfs
+        self.repo = repo
         self._defaultformat = defaultformat
         self._readonly = readonly
 
@@ -806,7 +807,7 @@
     if defaultformat is not None:
         kwargs['defaultformat'] = defaultformat
     readonly = not isenabled(repo, createmarkersopt)
-    store = obsstore(repo.svfs, readonly=readonly, **kwargs)
+    store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs)
     if store and readonly:
         ui.warn(
             _(b'obsolete feature not enabled but %i markers found!\n')
--- a/mercurial/pathutil.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/pathutil.py	Thu Mar 18 18:24:59 2021 -0400
@@ -15,11 +15,21 @@
     util,
 )
 
+if pycompat.TYPE_CHECKING:
+    from typing import (
+        Any,
+        Callable,
+        Iterator,
+        Optional,
+    )
+
+
 rustdirs = policy.importrust('dirstate', 'Dirs')
 parsers = policy.importmod('parsers')
 
 
 def _lowerclean(s):
+    # type: (bytes) -> bytes
     return encoding.hfsignoreclean(s.lower())
 
 
@@ -59,6 +69,7 @@
             self.normcase = lambda x: x
 
     def __call__(self, path, mode=None):
+        # type: (bytes, Optional[Any]) -> None
         """Check the relative path.
         path may contain a pattern (e.g. foodir/**.txt)"""
 
@@ -119,6 +130,7 @@
             self.audited.add(normpath)
 
     def _checkfs(self, prefix, path):
+        # type: (bytes, bytes) -> None
         """raise exception if a file system backed check fails"""
         curpath = os.path.join(self.root, prefix)
         try:
@@ -143,6 +155,7 @@
                     raise error.Abort(msg % (path, pycompat.bytestr(prefix)))
 
     def check(self, path):
+        # type: (bytes) -> bool
         try:
             self(path)
             return True
@@ -164,6 +177,7 @@
 
 
 def canonpath(root, cwd, myname, auditor=None):
+    # type: (bytes, bytes, bytes, Optional[pathauditor]) -> bytes
     """return the canonical path of myname, given cwd and root
 
     >>> def check(root, cwd, myname):
@@ -266,6 +280,7 @@
 
 
 def normasprefix(path):
+    # type: (bytes) -> bytes
     """normalize the specified path as path prefix
 
     Returned value can be used safely for "p.startswith(prefix)",
@@ -289,6 +304,7 @@
 
 
 def finddirs(path):
+    # type: (bytes) -> Iterator[bytes]
     pos = path.rfind(b'/')
     while pos != -1:
         yield path[:pos]
@@ -318,6 +334,7 @@
                 addpath(f)
 
     def addpath(self, path):
+        # type: (bytes) -> None
         dirs = self._dirs
         for base in finddirs(path):
             if base.endswith(b'/'):
@@ -330,6 +347,7 @@
             dirs[base] = 1
 
     def delpath(self, path):
+        # type: (bytes) -> None
         dirs = self._dirs
         for base in finddirs(path):
             if dirs[base] > 1:
@@ -341,6 +359,7 @@
         return iter(self._dirs)
 
     def __contains__(self, d):
+        # type: (bytes) -> bool
         return d in self._dirs
 
 
@@ -355,4 +374,4 @@
 # rather not let our internals know that we're thinking in posix terms
 # - instead we'll let them be oblivious.
 join = posixpath.join
-dirname = posixpath.dirname
+dirname = posixpath.dirname  # type: Callable[[bytes], bytes]
--- a/mercurial/phases.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/phases.py	Thu Mar 18 18:24:59 2021 -0400
@@ -127,10 +127,32 @@
     util,
 )
 
+if pycompat.TYPE_CHECKING:
+    from typing import (
+        Any,
+        Callable,
+        Dict,
+        Iterable,
+        List,
+        Optional,
+        Set,
+        Tuple,
+    )
+    from . import (
+        localrepo,
+        ui as uimod,
+    )
+
+    Phaseroots = Dict[int, Set[bytes]]
+    Phasedefaults = List[
+        Callable[[localrepo.localrepository, Phaseroots], Phaseroots]
+    ]
+
+
 _fphasesentry = struct.Struct(b'>i20s')
 
 # record phase index
-public, draft, secret = range(3)
+public, draft, secret = range(3)  # type: int
 archived = 32  # non-continuous for compatibility
 internal = 96  # non-continuous for compatibility
 allphases = (public, draft, secret, archived, internal)
@@ -154,11 +176,13 @@
 
 
 def supportinternal(repo):
+    # type: (localrepo.localrepository) -> bool
     """True if the internal phase can be used on a repository"""
     return requirements.INTERNAL_PHASE_REQUIREMENT in repo.requirements
 
 
 def _readroots(repo, phasedefaults=None):
+    # type: (localrepo.localrepository, Optional[Phasedefaults]) -> Tuple[Phaseroots, bool]
     """Read phase roots from disk
 
     phasedefaults is a list of fn(repo, roots) callable, which are
@@ -191,6 +215,7 @@
 
 
 def binaryencode(phasemapping):
+    # type: (Dict[int, List[bytes]]) -> bytes
     """encode a 'phase -> nodes' mapping into a binary stream
 
     The revision lists are encoded as (phase, root) pairs.
@@ -203,6 +228,7 @@
 
 
 def binarydecode(stream):
+    # type: (...) -> Dict[int, List[bytes]]
     """decode a binary stream into a 'phase -> nodes' mapping
 
     The (phase, root) pairs are turned back into a dictionary with
@@ -321,6 +347,7 @@
 
 class phasecache(object):
     def __init__(self, repo, phasedefaults, _load=True):
+        # type: (localrepo.localrepository, Optional[Phasedefaults], bool) -> None
         if _load:
             # Cheap trick to allow shallow-copy without copy module
             self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
@@ -330,6 +357,7 @@
             self.opener = repo.svfs
 
     def hasnonpublicphases(self, repo):
+        # type: (localrepo.localrepository) -> bool
         """detect if there are revisions with non-public phase"""
         repo = repo.unfiltered()
         cl = repo.changelog
@@ -343,6 +371,7 @@
         )
 
     def nonpublicphaseroots(self, repo):
+        # type: (localrepo.localrepository) -> Set[bytes]
         """returns the roots of all non-public phases
 
         The roots are not minimized, so if the secret revisions are
@@ -362,6 +391,8 @@
         )
 
     def getrevset(self, repo, phases, subset=None):
+        # type: (localrepo.localrepository, Iterable[int], Optional[Any]) -> Any
+        # TODO: finish typing this
         """return a smartset for the given phases"""
         self.loadphaserevs(repo)  # ensure phase's sets are loaded
         phases = set(phases)
@@ -457,6 +488,7 @@
         self._loadedrevslen = len(cl)
 
     def loadphaserevs(self, repo):
+        # type: (localrepo.localrepository) -> None
         """ensure phase information is loaded in the object"""
         if self._phasesets is None:
             try:
@@ -470,6 +502,7 @@
         self._phasesets = None
 
     def phase(self, repo, rev):
+        # type: (localrepo.localrepository, int) -> int
         # We need a repo argument here to be able to build _phasesets
         # if necessary. The repository instance is not stored in
         # phasecache to avoid reference cycles. The changelog instance
@@ -652,6 +685,7 @@
         return False
 
     def filterunknown(self, repo):
+        # type: (localrepo.localrepository) -> None
         """remove unknown nodes from the phase boundary
 
         Nothing is lost as unknown nodes only hold data for their descendants.
@@ -729,6 +763,7 @@
 
 
 def listphases(repo):
+    # type: (localrepo.localrepository) -> Dict[bytes, bytes]
     """List phases root for serialization over pushkey"""
     # Use ordered dictionary so behavior is deterministic.
     keys = util.sortdict()
@@ -760,6 +795,7 @@
 
 
 def pushphase(repo, nhex, oldphasestr, newphasestr):
+    # type: (localrepo.localrepository, bytes, bytes, bytes) -> bool
     """List phases root for serialization over pushkey"""
     repo = repo.unfiltered()
     with repo.lock():
@@ -909,6 +945,7 @@
 
 
 def newcommitphase(ui):
+    # type: (uimod.ui) -> int
     """helper to get the target phase of new commit
 
     Handle all possible values for the phases.new-commit options.
@@ -924,11 +961,13 @@
 
 
 def hassecret(repo):
+    # type: (localrepo.localrepository) -> bool
     """utility function that check if a repo have any secret changeset."""
     return bool(repo._phasecache.phaseroots[secret])
 
 
 def preparehookargs(node, old, new):
+    # type: (bytes, Optional[int], Optional[int]) -> Dict[bytes, bytes]
     if old is None:
         old = b''
     else:
--- a/mercurial/pure/parsers.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/pure/parsers.py	Thu Mar 18 18:24:59 2021 -0400
@@ -33,13 +33,6 @@
     return x
 
 
-indexformatng = b">Qiiiiii20s12x"
-indexfirst = struct.calcsize(b'Q')
-sizeint = struct.calcsize(b'i')
-indexsize = struct.calcsize(indexformatng)
-nullitem = (0, 0, 0, -1, -1, -1, -1, nullid)
-
-
 def gettype(q):
     return int(q & 0xFFFF)
 
@@ -49,6 +42,17 @@
 
 
 class BaseIndexObject(object):
+    # Format of an index entry according to Python's `struct` language
+    index_format = b">Qiiiiii20s12x"
+    # Size of a C unsigned long long int, platform independent
+    big_int_size = struct.calcsize(b'>Q')
+    # Size of a C long int, platform independent
+    int_size = struct.calcsize(b'>i')
+    # Size of the entire index format
+    index_size = struct.calcsize(index_format)
+    # An empty index entry, used as a default value to be overridden, or nullrev
+    null_item = (0, 0, 0, -1, -1, -1, -1, nullid)
+
     @property
     def nodemap(self):
         msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
@@ -94,7 +98,7 @@
     def append(self, tup):
         if '_nodemap' in vars(self):
             self._nodemap[tup[7]] = len(self)
-        data = _pack(indexformatng, *tup)
+        data = _pack(self.index_format, *tup)
         self._extra.append(data)
 
     def _check_index(self, i):
@@ -105,14 +109,14 @@
 
     def __getitem__(self, i):
         if i == -1:
-            return nullitem
+            return self.null_item
         self._check_index(i)
         if i >= self._lgt:
             data = self._extra[i - self._lgt]
         else:
             index = self._calculate_index(i)
-            data = self._data[index : index + indexsize]
-        r = _unpack(indexformatng, data)
+            data = self._data[index : index + self.index_size]
+        r = _unpack(self.index_format, data)
         if self._lgt and i == 0:
             r = (offset_type(0, gettype(r[0])),) + r[1:]
         return r
@@ -120,13 +124,13 @@
 
 class IndexObject(BaseIndexObject):
     def __init__(self, data):
-        assert len(data) % indexsize == 0
+        assert len(data) % self.index_size == 0
         self._data = data
-        self._lgt = len(data) // indexsize
+        self._lgt = len(data) // self.index_size
         self._extra = []
 
     def _calculate_index(self, i):
-        return i * indexsize
+        return i * self.index_size
 
     def __delitem__(self, i):
         if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
@@ -135,7 +139,7 @@
         self._check_index(i)
         self._stripnodes(i)
         if i < self._lgt:
-            self._data = self._data[: i * indexsize]
+            self._data = self._data[: i * self.index_size]
             self._lgt = i
             self._extra = []
         else:
@@ -198,14 +202,16 @@
         if lgt is not None:
             self._offsets = [0] * lgt
         count = 0
-        while off <= len(self._data) - indexsize:
+        while off <= len(self._data) - self.index_size:
+            start = off + self.big_int_size
             (s,) = struct.unpack(
-                b'>i', self._data[off + indexfirst : off + sizeint + indexfirst]
+                b'>i',
+                self._data[start : start + self.int_size],
             )
             if lgt is not None:
                 self._offsets[count] = off
             count += 1
-            off += indexsize + s
+            off += self.index_size + s
         if off != len(self._data):
             raise ValueError(b"corrupted data")
         return count
@@ -227,10 +233,82 @@
         return self._offsets[i]
 
 
-def parse_index2(data, inline):
+def parse_index2(data, inline, revlogv2=False):
     if not inline:
-        return IndexObject(data), None
-    return InlinedIndexObject(data, inline), (0, data)
+        cls = IndexObject2 if revlogv2 else IndexObject
+        return cls(data), None
+    cls = InlinedIndexObject2 if revlogv2 else InlinedIndexObject
+    return cls(data, inline), (0, data)
+
+
+class Index2Mixin(object):
+    #  6 bytes: offset
+    #  2 bytes: flags
+    #  4 bytes: compressed length
+    #  4 bytes: uncompressed length
+    #  4 bytes: base rev
+    #  4 bytes: link rev
+    #  4 bytes: parent 1 rev
+    #  4 bytes: parent 2 rev
+    # 32 bytes: nodeid
+    #  8 bytes: sidedata offset
+    #  4 bytes: sidedata compressed length
+    #  20 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
+    index_format = b">Qiiiiii20s12xQi20x"
+    index_size = struct.calcsize(index_format)
+    assert index_size == 96, index_size
+    null_item = (0, 0, 0, -1, -1, -1, -1, nullid, 0, 0)
+
+    def replace_sidedata_info(self, i, sidedata_offset, sidedata_length):
+        """
+        Replace an existing index entry's sidedata offset and length with new
+        ones.
+        This cannot be used outside of the context of sidedata rewriting,
+        inside the transaction that creates the revision `i`.
+        """
+        if i < 0:
+            raise KeyError
+        self._check_index(i)
+        sidedata_format = b">Qi"
+        packed_size = struct.calcsize(sidedata_format)
+        if i >= self._lgt:
+            packed = _pack(sidedata_format, sidedata_offset, sidedata_length)
+            old = self._extra[i - self._lgt]
+            new = old[:64] + packed + old[64 + packed_size :]
+            self._extra[i - self._lgt] = new
+        else:
+            msg = b"cannot rewrite entries outside of this transaction"
+            raise KeyError(msg)
+
+
+class IndexObject2(Index2Mixin, IndexObject):
+    pass
+
+
+class InlinedIndexObject2(Index2Mixin, InlinedIndexObject):
+    def _inline_scan(self, lgt):
+        sidedata_length_pos = 72
+        off = 0
+        if lgt is not None:
+            self._offsets = [0] * lgt
+        count = 0
+        while off <= len(self._data) - self.index_size:
+            start = off + self.big_int_size
+            (data_size,) = struct.unpack(
+                b'>i',
+                self._data[start : start + self.int_size],
+            )
+            start = off + sidedata_length_pos
+            (side_data_size,) = struct.unpack(
+                b'>i', self._data[start : start + self.int_size]
+            )
+            if lgt is not None:
+                self._offsets[count] = off
+            count += 1
+            off += self.index_size + data_size + side_data_size
+        if off != len(self._data):
+            raise ValueError(b"corrupted data")
+        return count
 
 
 def parse_index_devel_nodemap(data, inline):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/pythoncapi_compat.h	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,283 @@
+// Header file providing new functions of the Python C API to old Python
+// versions.
+//
+// File distributed under the MIT license.
+//
+// Homepage:
+// https://github.com/pythoncapi/pythoncapi_compat
+//
+// Latest version:
+// https://raw.githubusercontent.com/pythoncapi/pythoncapi_compat/master/pythoncapi_compat.h
+
+#ifndef PYTHONCAPI_COMPAT
+#define PYTHONCAPI_COMPAT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <Python.h>
+#include "frameobject.h"          // PyFrameObject, PyFrame_GetBack()
+
+
+/* VC 2008 doesn't know about the inline keyword. */
+#if defined(_MSC_VER) && _MSC_VER < 1900
+#define inline __forceinline
+#endif
+
+// Cast argument to PyObject* type.
+#ifndef _PyObject_CAST
+#  define _PyObject_CAST(op) ((PyObject*)(op))
+#endif
+
+
+// bpo-42262 added Py_NewRef() to Python 3.10.0a3
+#if PY_VERSION_HEX < 0x030a00A3 && !defined(Py_NewRef)
+static inline PyObject* _Py_NewRef(PyObject *obj)
+{
+    Py_INCREF(obj);
+    return obj;
+}
+#define Py_NewRef(obj) _Py_NewRef(_PyObject_CAST(obj))
+#endif
+
+
+// bpo-42262 added Py_XNewRef() to Python 3.10.0a3
+#if PY_VERSION_HEX < 0x030a00A3 && !defined(Py_XNewRef)
+static inline PyObject* _Py_XNewRef(PyObject *obj)
+{
+    Py_XINCREF(obj);
+    return obj;
+}
+#define Py_XNewRef(obj) _Py_XNewRef(_PyObject_CAST(obj))
+#endif
+
+
+// bpo-39573 added Py_SET_REFCNT() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_REFCNT)
+static inline void _Py_SET_REFCNT(PyObject *ob, Py_ssize_t refcnt)
+{
+    ob->ob_refcnt = refcnt;
+}
+#define Py_SET_REFCNT(ob, refcnt) _Py_SET_REFCNT((PyObject*)(ob), refcnt)
+#endif
+
+
+// bpo-39573 added Py_SET_TYPE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_TYPE)
+static inline void
+_Py_SET_TYPE(PyObject *ob, PyTypeObject *type)
+{
+    ob->ob_type = type;
+}
+#define Py_SET_TYPE(ob, type) _Py_SET_TYPE((PyObject*)(ob), type)
+#endif
+
+
+// bpo-39573 added Py_SET_SIZE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_SIZE)
+static inline void
+_Py_SET_SIZE(PyVarObject *ob, Py_ssize_t size)
+{
+    ob->ob_size = size;
+}
+#define Py_SET_SIZE(ob, size) _Py_SET_SIZE((PyVarObject*)(ob), size)
+#endif
+
+
+// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyCodeObject*
+PyFrame_GetCode(PyFrameObject *frame)
+{
+    PyCodeObject *code;
+    assert(frame != NULL);
+    code = frame->f_code;
+    assert(code != NULL);
+    Py_INCREF(code);
+    return code;
+}
+#endif
+
+static inline PyCodeObject*
+_PyFrame_GetCodeBorrow(PyFrameObject *frame)
+{
+    PyCodeObject *code = PyFrame_GetCode(frame);
+    Py_DECREF(code);
+    return code;  // borrowed reference
+}
+
+
+// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyFrameObject*
+PyFrame_GetBack(PyFrameObject *frame)
+{
+    PyFrameObject *back;
+    assert(frame != NULL);
+    back = frame->f_back;
+    Py_XINCREF(back);
+    return back;
+}
+#endif
+
+static inline PyFrameObject*
+_PyFrame_GetBackBorrow(PyFrameObject *frame)
+{
+    PyFrameObject *back = PyFrame_GetBack(frame);
+    Py_XDECREF(back);
+    return back;  // borrowed reference
+}
+
+
+// bpo-39947 added PyThreadState_GetInterpreter() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline PyInterpreterState *
+PyThreadState_GetInterpreter(PyThreadState *tstate)
+{
+    assert(tstate != NULL);
+    return tstate->interp;
+}
+#endif
+
+
+// bpo-40429 added PyThreadState_GetFrame() to Python 3.9.0b1
+#if PY_VERSION_HEX < 0x030900B1
+static inline PyFrameObject*
+PyThreadState_GetFrame(PyThreadState *tstate)
+{
+    PyFrameObject *frame;
+    assert(tstate != NULL);
+    frame = tstate->frame;
+    Py_XINCREF(frame);
+    return frame;
+}
+#endif
+
+static inline PyFrameObject*
+_PyThreadState_GetFrameBorrow(PyThreadState *tstate)
+{
+    PyFrameObject *frame = PyThreadState_GetFrame(tstate);
+    Py_XDECREF(frame);
+    return frame;  // borrowed reference
+}
+
+
+// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline PyInterpreterState *
+PyInterpreterState_Get(void)
+{
+    PyThreadState *tstate;
+    PyInterpreterState *interp;
+
+    tstate = PyThreadState_GET();
+    if (tstate == NULL) {
+        Py_FatalError("GIL released (tstate is NULL)");
+    }
+    interp = tstate->interp;
+    if (interp == NULL) {
+        Py_FatalError("no current interpreter");
+    }
+    return interp;
+}
+#endif
+
+
+// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a6
+#if 0x030700A1 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x030900A6
+static inline uint64_t
+PyThreadState_GetID(PyThreadState *tstate)
+{
+    assert(tstate != NULL);
+    return tstate->id;
+}
+#endif
+
+
+// bpo-37194 added PyObject_CallNoArgs() to Python 3.9.0a1
+#if PY_VERSION_HEX < 0x030900A1
+static inline PyObject*
+PyObject_CallNoArgs(PyObject *func)
+{
+    return PyObject_CallFunctionObjArgs(func, NULL);
+}
+#endif
+
+
+// bpo-39245 made PyObject_CallOneArg() public (previously called
+// _PyObject_CallOneArg) in Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4
+static inline PyObject*
+PyObject_CallOneArg(PyObject *func, PyObject *arg)
+{
+    return PyObject_CallFunctionObjArgs(func, arg, NULL);
+}
+#endif
+
+
+// bpo-40024 added PyModule_AddType() to Python 3.9.0a5
+#if PY_VERSION_HEX < 0x030900A5
+static inline int
+PyModule_AddType(PyObject *module, PyTypeObject *type)
+{
+    const char *name, *dot;
+
+    if (PyType_Ready(type) < 0) {
+        return -1;
+    }
+
+    // inline _PyType_Name()
+    name = type->tp_name;
+    assert(name != NULL);
+    dot = strrchr(name, '.');
+    if (dot != NULL) {
+        name = dot + 1;
+    }
+
+    Py_INCREF(type);
+    if (PyModule_AddObject(module, name, (PyObject *)type) < 0) {
+        Py_DECREF(type);
+        return -1;
+    }
+
+    return 0;
+}
+#endif
+
+
+// bpo-40241 added PyObject_GC_IsTracked() to Python 3.9.0a6.
+// bpo-4688 added _PyObject_GC_IS_TRACKED() to Python 2.7.0a2.
+#if PY_VERSION_HEX < 0x030900A6
+static inline int
+PyObject_GC_IsTracked(PyObject* obj)
+{
+    return (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj));
+}
+#endif
+
+// bpo-40241 added PyObject_GC_IsFinalized() to Python 3.9.0a6.
+// bpo-18112 added _PyGCHead_FINALIZED() to Python 3.4.0 final.
+#if PY_VERSION_HEX < 0x030900A6 && PY_VERSION_HEX >= 0x030400F0
+static inline int
+PyObject_GC_IsFinalized(PyObject *obj)
+{
+    return (PyObject_IS_GC(obj) && _PyGCHead_FINALIZED((PyGC_Head *)(obj)-1));
+}
+#endif
+
+
+// bpo-39573 added Py_IS_TYPE() to Python 3.9.0a4
+#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_IS_TYPE)
+static inline int
+_Py_IS_TYPE(const PyObject *ob, const PyTypeObject *type) {
+    return ob->ob_type == type;
+}
+#define Py_IS_TYPE(ob, type) _Py_IS_TYPE((const PyObject*)(ob), type)
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif  // PYTHONCAPI_COMPAT
--- a/mercurial/repair.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/repair.py	Thu Mar 18 18:24:59 2021 -0400
@@ -308,11 +308,12 @@
     if not tostrip:
         return None
 
-    newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
+    backupfile = None
     if backup:
         node = tostrip[0]
         backupfile = _createstripbackup(repo, tostrip, node, topic)
 
+    newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
     with repo.transaction(b'strip') as tr:
         phases.retractboundary(repo, tr, phases.archived, tostrip)
         bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
@@ -442,7 +443,7 @@
     """
     repo = repo.unfiltered()
 
-    if b'fncache' not in repo.requirements:
+    if requirements.FNCACHE_REQUIREMENT not in repo.requirements:
         ui.warn(
             _(
                 b'(not rebuilding fncache because repository does not '
--- a/mercurial/requirements.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/requirements.py	Thu Mar 18 18:24:59 2021 -0400
@@ -7,6 +7,11 @@
 
 from __future__ import absolute_import
 
+GENERALDELTA_REQUIREMENT = b'generaldelta'
+DOTENCODE_REQUIREMENT = b'dotencode'
+STORE_REQUIREMENT = b'store'
+FNCACHE_REQUIREMENT = b'fncache'
+
 # When narrowing is finalized and no longer subject to format changes,
 # we should move this to just "narrow" or similar.
 NARROW_REQUIREMENT = b'narrowhg-experimental'
@@ -21,9 +26,11 @@
 # Stores manifest in Tree structure
 TREEMANIFEST_REQUIREMENT = b'treemanifest'
 
+REVLOGV1_REQUIREMENT = b'revlogv1'
+
 # Increment the sub-version when the revlog v2 format changes to lock out old
 # clients.
-REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
+REVLOGV2_REQUIREMENT = b'exp-revlogv2.2'
 
 # A repository with the sparserevlog feature will have delta chains that
 # can spread over a larger span. Sparse reading cuts these large spans into
--- a/mercurial/revlog.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/revlog.py	Thu Mar 18 18:24:59 2021 -0400
@@ -28,6 +28,7 @@
     nullhex,
     nullid,
     nullrev,
+    sha1nodeconstants,
     short,
     wdirfilenodeids,
     wdirhex,
@@ -83,6 +84,7 @@
     storageutil,
     stringutil,
 )
+from .pure import parsers as pureparsers
 
 # blanked usage of all the name to prevent pyflakes constraints
 # We need these name available in the module for extensions.
@@ -119,10 +121,10 @@
 
 # Flag processors for REVIDX_ELLIPSIS.
 def ellipsisreadprocessor(rl, text):
-    return text, False, {}
-
-
-def ellipsiswriteprocessor(rl, text, sidedata):
+    return text, False
+
+
+def ellipsiswriteprocessor(rl, text):
     return text, False
 
 
@@ -203,6 +205,7 @@
     baserevisionsize = attr.ib()
     revision = attr.ib()
     delta = attr.ib()
+    sidedata = attr.ib()
     linknode = attr.ib(default=None)
 
 
@@ -364,6 +367,25 @@
         return p
 
 
+indexformatv2 = struct.Struct(pureparsers.Index2Mixin.index_format)
+indexformatv2_pack = indexformatv2.pack
+
+
+class revlogv2io(object):
+    def __init__(self):
+        self.size = indexformatv2.size
+
+    def parseindex(self, data, inline):
+        index, cache = parsers.parse_index2(data, inline, revlogv2=True)
+        return index, cache
+
+    def packentry(self, entry, node, version, rev):
+        p = indexformatv2_pack(*entry)
+        if rev == 0:
+            p = versionformat_pack(version) + p[4:]
+        return p
+
+
 NodemapRevlogIO = None
 
 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
@@ -421,6 +443,11 @@
 
     If `upperboundcomp` is not None, this is the expected maximal gain from
     compression for the data content.
+
+    `concurrencychecker` is an optional function that receives 3 arguments: a
+    file handle, a filename, and an expected position. It should check whether
+    the current position in the file handle is valid, and log/warn/fail (by
+    raising).
     """
 
     _flagserrorclass = error.RevlogError
@@ -435,6 +462,7 @@
         censorable=False,
         upperboundcomp=None,
         persistentnodemap=False,
+        concurrencychecker=None,
     ):
         """
         create a revlog object
@@ -448,14 +476,9 @@
         self.datafile = datafile or (indexfile[:-2] + b".d")
         self.nodemap_file = None
         if persistentnodemap:
-            if indexfile.endswith(b'.a'):
-                pending_path = indexfile[:-4] + b".n.a"
-                if opener.exists(pending_path):
-                    self.nodemap_file = pending_path
-                else:
-                    self.nodemap_file = indexfile[:-4] + b".n"
-            else:
-                self.nodemap_file = indexfile[:-2] + b".n"
+            self.nodemap_file = nodemaputil.get_nodemap_file(
+                opener, self.indexfile
+            )
 
         self.opener = opener
         #  When True, indexfile is opened with checkambig=True at writing, to
@@ -495,6 +518,8 @@
 
         self._loadindex()
 
+        self._concurrencychecker = concurrencychecker
+
     def _loadindex(self):
         mmapindexthreshold = None
         opts = self.opener.options
@@ -531,8 +556,6 @@
         if self._mmaplargeindex and b'mmapindexthreshold' in opts:
             mmapindexthreshold = opts[b'mmapindexthreshold']
         self.hassidedata = bool(opts.get(b'side-data', False))
-        if self.hassidedata:
-            self._flagprocessors[REVIDX_SIDEDATA] = sidedatautil.processors
         self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
         withsparseread = bool(opts.get(b'with-sparse-read', False))
         # sparse-revlog forces sparse-read
@@ -617,7 +640,11 @@
                     % (flags >> 16, fmt, self.indexfile)
                 )
 
-            self._inline = versionflags & FLAG_INLINE_DATA
+            # There is a bug in the transaction handling when going from an
+            # inline revlog to a separate index and data file. Turn it off until
+            # it's fixed, since v2 revlogs sometimes get rewritten on exchange.
+            # See issue6485
+            self._inline = False
             # generaldelta implied by version 2 revlogs.
             self._generaldelta = True
 
@@ -625,6 +652,10 @@
             raise error.RevlogError(
                 _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
             )
+
+        self.nodeconstants = sha1nodeconstants
+        self.nullid = self.nodeconstants.nullid
+
         # sparse-revlog can't be on without general-delta (issue6056)
         if not self._generaldelta:
             self._sparserevlog = False
@@ -647,6 +678,8 @@
         self._io = revlogio()
         if self.version == REVLOGV0:
             self._io = revlogoldio()
+        elif fmt == REVLOGV2:
+            self._io = revlogv2io()
         elif devel_nodemap:
             self._io = NodemapRevlogIO()
         elif use_rust_index:
@@ -831,6 +864,11 @@
     def length(self, rev):
         return self.index[rev][1]
 
+    def sidedata_length(self, rev):
+        if self.version & 0xFFFF != REVLOGV2:
+            return 0
+        return self.index[rev][9]
+
     def rawsize(self, rev):
         """return the length of the uncompressed text for a given revision"""
         l = self.index[rev][2]
@@ -875,8 +913,10 @@
             if rev == wdirrev:
                 raise error.WdirUnsupported
             raise
-
-        return entry[5], entry[6]
+        if entry[5] == nullrev:
+            return entry[6], entry[5]
+        else:
+            return entry[5], entry[6]
 
     # fast parentrevs(rev) where rev isn't filtered
     _uncheckedparentrevs = parentrevs
@@ -897,7 +937,11 @@
     def parents(self, node):
         i = self.index
         d = i[self.rev(node)]
-        return i[d[5]][7], i[d[6]][7]  # map revisions to nodes inline
+        # inline node() to avoid function call overhead
+        if d[5] == nullid:
+            return i[d[6]][7], i[d[5]][7]
+        else:
+            return i[d[5]][7], i[d[6]][7]
 
     def chainlen(self, rev):
         return self._chaininfo(rev)[0]
@@ -1828,7 +1872,7 @@
         elif operation == b'read':
             return flagutil.processflagsread(self, text, flags)
         else:  # write operation
-            return flagutil.processflagswrite(self, text, flags, None)
+            return flagutil.processflagswrite(self, text, flags)
 
     def revision(self, nodeorrev, _df=None, raw=False):
         """return an uncompressed revision of a given node or revision
@@ -1873,10 +1917,17 @@
         # revision or might need to be processed to retrieve the revision.
         rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
 
+        if self.version & 0xFFFF == REVLOGV2:
+            if rev is None:
+                rev = self.rev(node)
+            sidedata = self._sidedata(rev)
+        else:
+            sidedata = {}
+
         if raw and validated:
             # if we don't want to process the raw text and that raw
             # text is cached, we can exit early.
-            return rawtext, {}
+            return rawtext, sidedata
         if rev is None:
             rev = self.rev(node)
         # the revlog's flag for this revision
@@ -1885,20 +1936,14 @@
 
         if validated and flags == REVIDX_DEFAULT_FLAGS:
             # no extra flags set, no flag processor runs, text = rawtext
-            return rawtext, {}
-
-        sidedata = {}
+            return rawtext, sidedata
+
         if raw:
             validatehash = flagutil.processflagsraw(self, rawtext, flags)
             text = rawtext
         else:
-            try:
-                r = flagutil.processflagsread(self, rawtext, flags)
-            except error.SidedataHashError as exc:
-                msg = _(b"integrity check failed on %s:%s sidedata key %d")
-                msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey)
-                raise error.RevlogError(msg)
-            text, validatehash, sidedata = r
+            r = flagutil.processflagsread(self, rawtext, flags)
+            text, validatehash = r
         if validatehash:
             self.checkhash(text, node, rev=rev)
         if not validated:
@@ -1949,6 +1994,21 @@
         del basetext  # let us have a chance to free memory early
         return (rev, rawtext, False)
 
+    def _sidedata(self, rev):
+        """Return the sidedata for a given revision number."""
+        index_entry = self.index[rev]
+        sidedata_offset = index_entry[8]
+        sidedata_size = index_entry[9]
+
+        if self._inline:
+            sidedata_offset += self._io.size * (1 + rev)
+        if sidedata_size == 0:
+            return {}
+
+        segment = self._getsegment(sidedata_offset, sidedata_size)
+        sidedata = sidedatautil.deserialize_sidedata(segment)
+        return sidedata
+
     def rawdata(self, nodeorrev, _df=None):
         """return an uncompressed raw data of a given node or revision number.
 
@@ -2082,20 +2142,15 @@
 
         if sidedata is None:
             sidedata = {}
-            flags = flags & ~REVIDX_SIDEDATA
         elif not self.hassidedata:
             raise error.ProgrammingError(
                 _(b"trying to add sidedata to a revlog who don't support them")
             )
-        else:
-            flags |= REVIDX_SIDEDATA
 
         if flags:
             node = node or self.hash(text, p1, p2)
 
-        rawtext, validatehash = flagutil.processflagswrite(
-            self, text, flags, sidedata=sidedata
-        )
+        rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
 
         # If the flag processor modifies the revision data, ignore any provided
         # cachedelta.
@@ -2111,8 +2166,9 @@
             )
 
         node = node or self.hash(rawtext, p1, p2)
-        if self.index.has_node(node):
-            return node
+        rev = self.index.get_rev(node)
+        if rev is not None:
+            return rev
 
         if validatehash:
             self.checkhash(rawtext, node, p1=p1, p2=p2)
@@ -2127,6 +2183,7 @@
             flags,
             cachedelta=cachedelta,
             deltacomputer=deltacomputer,
+            sidedata=sidedata,
         )
 
     def addrawrevision(
@@ -2140,6 +2197,7 @@
         flags,
         cachedelta=None,
         deltacomputer=None,
+        sidedata=None,
     ):
         """add a raw revision with known flags, node and parents
         useful when reusing a revision not stored in this revlog (ex: received
@@ -2162,6 +2220,7 @@
                 ifh,
                 dfh,
                 deltacomputer=deltacomputer,
+                sidedata=sidedata,
             )
         finally:
             if dfh:
@@ -2255,6 +2314,7 @@
         dfh,
         alwayscache=False,
         deltacomputer=None,
+        sidedata=None,
     ):
         """internal function to add revisions to the log
 
@@ -2287,7 +2347,23 @@
 
         curr = len(self)
         prev = curr - 1
-        offset = self.end(prev)
+
+        offset = self._get_data_offset(prev)
+
+        if self._concurrencychecker:
+            if self._inline:
+                # offset is "as if" it were in the .d file, so we need to add on
+                # the size of the entry metadata.
+                self._concurrencychecker(
+                    ifh, self.indexfile, offset + curr * self._io.size
+                )
+            else:
+                # Entries in the .i are a consistent size.
+                self._concurrencychecker(
+                    ifh, self.indexfile, curr * self._io.size
+                )
+                self._concurrencychecker(dfh, self.datafile, offset)
+
         p1r, p2r = self.rev(p1), self.rev(p2)
 
         # full versions are inserted when the needed deltas
@@ -2309,6 +2385,16 @@
 
         deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
 
+        if sidedata:
+            serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
+            sidedata_offset = offset + deltainfo.deltalen
+        else:
+            serialized_sidedata = b""
+            # Don't store the offset if the sidedata is empty, that way
+            # we can easily detect empty sidedata and they will be no different
+            # than ones we manually add.
+            sidedata_offset = 0
+
         e = (
             offset_type(offset, flags),
             deltainfo.deltalen,
@@ -2318,12 +2404,24 @@
             p1r,
             p2r,
             node,
+            sidedata_offset,
+            len(serialized_sidedata),
         )
+
+        if self.version & 0xFFFF != REVLOGV2:
+            e = e[:8]
+
         self.index.append(e)
-
         entry = self._io.packentry(e, self.node, self.version, curr)
         self._writeentry(
-            transaction, ifh, dfh, entry, deltainfo.data, link, offset
+            transaction,
+            ifh,
+            dfh,
+            entry,
+            deltainfo.data,
+            link,
+            offset,
+            serialized_sidedata,
         )
 
         rawtext = btext[0]
@@ -2334,9 +2432,31 @@
         if type(rawtext) == bytes:  # only accept immutable objects
             self._revisioncache = (node, curr, rawtext)
         self._chainbasecache[curr] = deltainfo.chainbase
-        return node
-
-    def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
+        return curr
+
+    def _get_data_offset(self, prev):
+        """Returns the current offset in the (in-transaction) data file.
+        Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
+        file to store that information: since sidedata can be rewritten to the
+        end of the data file within a transaction, you can have cases where, for
+        example, rev `n` does not have sidedata while rev `n - 1` does, leading
+        to `n - 1`'s sidedata being written after `n`'s data.
+
+        TODO cache this in a docket file before getting out of experimental."""
+        if self.version & 0xFFFF != REVLOGV2:
+            return self.end(prev)
+
+        offset = 0
+        for rev, entry in enumerate(self.index):
+            sidedata_end = entry[8] + entry[9]
+            # Sidedata for a previous rev has potentially been written after
+            # this rev's end, so take the max.
+            offset = max(self.end(rev), offset, sidedata_end)
+        return offset
+
+    def _writeentry(
+        self, transaction, ifh, dfh, entry, data, link, offset, sidedata
+    ):
         # Files opened in a+ mode have inconsistent behavior on various
         # platforms. Windows requires that a file positioning call be made
         # when the file handle transitions between reads and writes. See
@@ -2360,6 +2480,8 @@
             if data[0]:
                 dfh.write(data[0])
             dfh.write(data[1])
+            if sidedata:
+                dfh.write(sidedata)
             ifh.write(entry)
         else:
             offset += curr * self._io.size
@@ -2367,6 +2489,8 @@
             ifh.write(entry)
             ifh.write(data[0])
             ifh.write(data[1])
+            if sidedata:
+                ifh.write(sidedata)
             self._enforceinlinesize(transaction, ifh)
         nodemaputil.setup_persistent_nodemap(transaction, self)
 
@@ -2375,6 +2499,7 @@
         deltas,
         linkmapper,
         transaction,
+        alwayscache=False,
         addrevisioncb=None,
         duplicaterevisioncb=None,
     ):
@@ -2418,15 +2543,16 @@
             deltacomputer = deltautil.deltacomputer(self)
             # loop through our set of deltas
             for data in deltas:
-                node, p1, p2, linknode, deltabase, delta, flags = data
+                node, p1, p2, linknode, deltabase, delta, flags, sidedata = data
                 link = linkmapper(linknode)
                 flags = flags or REVIDX_DEFAULT_FLAGS
 
-                if self.index.has_node(node):
+                rev = self.index.get_rev(node)
+                if rev is not None:
                     # this can happen if two branches make the same change
-                    self._nodeduplicatecallback(transaction, node)
+                    self._nodeduplicatecallback(transaction, rev)
                     if duplicaterevisioncb:
-                        duplicaterevisioncb(self, node)
+                        duplicaterevisioncb(self, rev)
                     empty = False
                     continue
 
@@ -2464,7 +2590,7 @@
                 # We're only using addgroup() in the context of changegroup
                 # generation so the revision data can always be handled as raw
                 # by the flagprocessor.
-                self._addrevision(
+                rev = self._addrevision(
                     node,
                     None,
                     transaction,
@@ -2475,12 +2601,13 @@
                     (baserev, delta),
                     ifh,
                     dfh,
-                    alwayscache=bool(addrevisioncb),
+                    alwayscache=alwayscache,
                     deltacomputer=deltacomputer,
+                    sidedata=sidedata,
                 )
 
                 if addrevisioncb:
-                    addrevisioncb(self, node)
+                    addrevisioncb(self, rev)
                 empty = False
 
                 if not dfh and not self._inline:
@@ -2621,6 +2748,7 @@
         revisiondata=False,
         assumehaveparentrevisions=False,
         deltamode=repository.CG_DELTAMODE_STD,
+        sidedata_helpers=None,
     ):
         if nodesorder not in (b'nodes', b'storage', b'linear', None):
             raise error.ProgrammingError(
@@ -2649,6 +2777,7 @@
             deltamode=deltamode,
             revisiondata=revisiondata,
             assumehaveparentrevisions=assumehaveparentrevisions,
+            sidedata_helpers=sidedata_helpers,
         )
 
     DELTAREUSEALWAYS = b'always'
@@ -3087,3 +3216,54 @@
             )
 
         return d
+
+    def rewrite_sidedata(self, helpers, startrev, endrev):
+        if self.version & 0xFFFF != REVLOGV2:
+            return
+        # inline are not yet supported because they suffer from an issue when
+        # rewriting them (since it's not an append-only operation).
+        # See issue6485.
+        assert not self._inline
+        if not helpers[1] and not helpers[2]:
+            # Nothing to generate or remove
+            return
+
+        new_entries = []
+        # append the new sidedata
+        with self._datafp(b'a+') as fp:
+            # Maybe this bug still exists, see revlog._writeentry
+            fp.seek(0, os.SEEK_END)
+            current_offset = fp.tell()
+            for rev in range(startrev, endrev + 1):
+                entry = self.index[rev]
+                new_sidedata = storageutil.run_sidedata_helpers(
+                    store=self,
+                    sidedata_helpers=helpers,
+                    sidedata={},
+                    rev=rev,
+                )
+
+                serialized_sidedata = sidedatautil.serialize_sidedata(
+                    new_sidedata
+                )
+                if entry[8] != 0 or entry[9] != 0:
+                    # rewriting entries that already have sidedata is not
+                    # supported yet, because it introduces garbage data in the
+                    # revlog.
+                    msg = "Rewriting existing sidedata is not supported yet"
+                    raise error.Abort(msg)
+                entry = entry[:8]
+                entry += (current_offset, len(serialized_sidedata))
+
+                fp.write(serialized_sidedata)
+                new_entries.append(entry)
+                current_offset += len(serialized_sidedata)
+
+        # rewrite the new index entries
+        with self._indexfp(b'w+') as fp:
+            fp.seek(startrev * self._io.size)
+            for i, entry in enumerate(new_entries):
+                rev = startrev + i
+                self.index.replace_sidedata_info(rev, entry[8], entry[9])
+                packed = self._io.packentry(entry, self.node, self.version, rev)
+                fp.write(packed)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revlogutils/concurrency_checker.py	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,38 @@
+from ..i18n import _
+from .. import error
+
+
+def get_checker(ui, revlog_name=b'changelog'):
+    """Get a function that checks file handle position is as expected.
+
+    This is used to ensure that files haven't been modified outside of our
+    knowledge (such as on a networked filesystem, if `hg debuglocks` was used,
+    or writes to .hg that ignored locks happened).
+
+    Due to revlogs supporting a concept of buffered, delayed, or diverted
+    writes, we're allowing the files to be shorter than expected (the data may
+    not have been written yet), but they can't be longer.
+
+    Please note that this check is not perfect; it can't detect all cases (there
+    may be false-negatives/false-OKs), but it should never claim there's an
+    issue when there isn't (false-positives/false-failures).
+    """
+
+    vpos = ui.config(b'debug', b'revlog.verifyposition.' + revlog_name)
+    # Avoid any `fh.tell` cost if this isn't enabled.
+    if not vpos or vpos not in [b'log', b'warn', b'fail']:
+        return None
+
+    def _checker(fh, fn, expected):
+        if fh.tell() <= expected:
+            return
+
+        msg = _(b'%s: file cursor at position %d, expected %d')
+        # Always log if we're going to warn or fail.
+        ui.log(b'debug', msg + b'\n', fn, fh.tell(), expected)
+        if vpos == b'warn':
+            ui.warn((msg + b'\n') % (fn, fh.tell(), expected))
+        elif vpos == b'fail':
+            raise error.RevlogError(msg % (fn, fh.tell(), expected))
+
+    return _checker
--- a/mercurial/revlogutils/constants.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/revlogutils/constants.py	Thu Mar 18 18:24:59 2021 -0400
@@ -15,7 +15,6 @@
 REVLOGV0 = 0
 REVLOGV1 = 1
 # Dummy value until file format is finalized.
-# Reminder: change the bounds check in revlog.__init__ when this is changed.
 REVLOGV2 = 0xDEAD
 # Shared across v1 and v2.
 FLAG_INLINE_DATA = 1 << 16
--- a/mercurial/revlogutils/flagutil.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/revlogutils/flagutil.py	Thu Mar 18 18:24:59 2021 -0400
@@ -84,7 +84,7 @@
     flagprocessors[flag] = processor
 
 
-def processflagswrite(revlog, text, flags, sidedata):
+def processflagswrite(revlog, text, flags):
     """Inspect revision data flags and applies write transformations defined
     by registered flag processors.
 
@@ -100,9 +100,12 @@
     processed text and ``validatehash`` is a bool indicating whether the
     returned text should be checked for hash integrity.
     """
-    return _processflagsfunc(revlog, text, flags, b'write', sidedata=sidedata)[
-        :2
-    ]
+    return _processflagsfunc(
+        revlog,
+        text,
+        flags,
+        b'write',
+    )[:2]
 
 
 def processflagsread(revlog, text, flags):
@@ -145,14 +148,14 @@
     return _processflagsfunc(revlog, text, flags, b'raw')[1]
 
 
-def _processflagsfunc(revlog, text, flags, operation, sidedata=None):
+def _processflagsfunc(revlog, text, flags, operation):
     """internal function to process flag on a revlog
 
     This function is private to this module, code should never needs to call it
     directly."""
     # fast path: no flag processors will run
     if flags == 0:
-        return text, True, {}
+        return text, True
     if operation not in (b'read', b'write', b'raw'):
         raise error.ProgrammingError(_(b"invalid '%s' operation") % operation)
     # Check all flags are known.
@@ -168,7 +171,6 @@
     if operation == b'write':
         orderedflags = reversed(orderedflags)
 
-    outsidedata = {}
     for flag in orderedflags:
         # If a flagprocessor has been registered for a known flag, apply the
         # related operation transform and update result tuple.
@@ -186,10 +188,9 @@
                 if operation == b'raw':
                     vhash = rawtransform(revlog, text)
                 elif operation == b'read':
-                    text, vhash, s = readtransform(revlog, text)
-                    outsidedata.update(s)
+                    text, vhash = readtransform(revlog, text)
                 else:  # write operation
-                    text, vhash = writetransform(revlog, text, sidedata)
+                    text, vhash = writetransform(revlog, text)
             validatehash = validatehash and vhash
 
-    return text, validatehash, outsidedata
+    return text, validatehash
--- a/mercurial/revlogutils/nodemap.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/revlogutils/nodemap.py	Thu Mar 18 18:24:59 2021 -0400
@@ -81,9 +81,9 @@
     if tr.hasfinalize(callback_id):
         return  # no need to register again
     tr.addpending(
-        callback_id, lambda tr: _persist_nodemap(tr, revlog, pending=True)
+        callback_id, lambda tr: persist_nodemap(tr, revlog, pending=True)
     )
-    tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog))
+    tr.addfinalize(callback_id, lambda tr: persist_nodemap(tr, revlog))
 
 
 class _NoTransaction(object):
@@ -123,20 +123,33 @@
         return  # we do not use persistent_nodemap on this revlog
 
     notr = _NoTransaction()
-    _persist_nodemap(notr, revlog)
+    persist_nodemap(notr, revlog)
     for k in sorted(notr._postclose):
         notr._postclose[k](None)
 
 
-def _persist_nodemap(tr, revlog, pending=False):
+def delete_nodemap(tr, repo, revlog):
+    """ Delete nodemap data on disk for a given revlog"""
+    if revlog.nodemap_file is None:
+        msg = "calling persist nodemap on a revlog without the feature enabled"
+        raise error.ProgrammingError(msg)
+    repo.svfs.unlink(revlog.nodemap_file)
+
+
+def persist_nodemap(tr, revlog, pending=False, force=False):
     """Write nodemap data on disk for a given revlog"""
     if getattr(revlog, 'filteredrevs', ()):
         raise error.ProgrammingError(
             "cannot persist nodemap of a filtered changelog"
         )
     if revlog.nodemap_file is None:
-        msg = "calling persist nodemap on a revlog without the feature enableb"
-        raise error.ProgrammingError(msg)
+        if force:
+            revlog.nodemap_file = get_nodemap_file(
+                revlog.opener, revlog.indexfile
+            )
+        else:
+            msg = "calling persist nodemap on a revlog without the feature enabled"
+            raise error.ProgrammingError(msg)
 
     can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
     ondisk_docket = revlog._nodemap_docket
@@ -634,3 +647,14 @@
     if isinstance(entry, dict):
         return _find_node(entry, node[1:])
     return entry
+
+
+def get_nodemap_file(opener, indexfile):
+    if indexfile.endswith(b'.a'):
+        pending_path = indexfile[:-4] + b".n.a"
+        if opener.exists(pending_path):
+            return pending_path
+        else:
+            return indexfile[:-4] + b".n"
+    else:
+        return indexfile[:-2] + b".n"
--- a/mercurial/revlogutils/sidedata.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/revlogutils/sidedata.py	Thu Mar 18 18:24:59 2021 -0400
@@ -13,9 +13,8 @@
 The current implementation is experimental and subject to changes. Do not rely
 on it in production.
 
-Sidedata are stored in the revlog itself, within the revision rawtext. They
-are inserted and removed from it using the flagprocessors mechanism. The following
-format is currently used::
+Sidedata are stored in the revlog itself, thanks to a new version of the
+revlog. The following format is currently used::
 
     initial header:
         <number of sidedata; 2 bytes>
@@ -60,48 +59,35 @@
 SIDEDATA_ENTRY = struct.Struct('>HL20s')
 
 
-def sidedatawriteprocessor(rl, text, sidedata):
+def serialize_sidedata(sidedata):
     sidedata = list(sidedata.items())
     sidedata.sort()
-    rawtext = [SIDEDATA_HEADER.pack(len(sidedata))]
+    buf = [SIDEDATA_HEADER.pack(len(sidedata))]
     for key, value in sidedata:
         digest = hashutil.sha1(value).digest()
-        rawtext.append(SIDEDATA_ENTRY.pack(key, len(value), digest))
+        buf.append(SIDEDATA_ENTRY.pack(key, len(value), digest))
     for key, value in sidedata:
-        rawtext.append(value)
-    rawtext.append(bytes(text))
-    return b''.join(rawtext), False
+        buf.append(value)
+    buf = b''.join(buf)
+    return buf
 
 
-def sidedatareadprocessor(rl, text):
+def deserialize_sidedata(blob):
     sidedata = {}
     offset = 0
-    (nbentry,) = SIDEDATA_HEADER.unpack(text[: SIDEDATA_HEADER.size])
+    (nbentry,) = SIDEDATA_HEADER.unpack(blob[: SIDEDATA_HEADER.size])
     offset += SIDEDATA_HEADER.size
     dataoffset = SIDEDATA_HEADER.size + (SIDEDATA_ENTRY.size * nbentry)
     for i in range(nbentry):
         nextoffset = offset + SIDEDATA_ENTRY.size
-        key, size, storeddigest = SIDEDATA_ENTRY.unpack(text[offset:nextoffset])
+        key, size, storeddigest = SIDEDATA_ENTRY.unpack(blob[offset:nextoffset])
         offset = nextoffset
         # read the data associated with that entry
         nextdataoffset = dataoffset + size
-        entrytext = text[dataoffset:nextdataoffset]
+        entrytext = bytes(blob[dataoffset:nextdataoffset])
         readdigest = hashutil.sha1(entrytext).digest()
         if storeddigest != readdigest:
             raise error.SidedataHashError(key, storeddigest, readdigest)
         sidedata[key] = entrytext
         dataoffset = nextdataoffset
-    text = text[dataoffset:]
-    return text, True, sidedata
-
-
-def sidedatarawprocessor(rl, text):
-    # side data modifies rawtext and prevent rawtext hash validation
-    return False
-
-
-processors = (
-    sidedatareadprocessor,
-    sidedatawriteprocessor,
-    sidedatarawprocessor,
-)
+    return sidedata
--- a/mercurial/revset.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/revset.py	Thu Mar 18 18:24:59 2021 -0400
@@ -1826,9 +1826,9 @@
         l and getstring(l[0], _(b"outgoing requires a repository path")) or b''
     )
     if not dest:
-        # ui.paths.getpath() explicitly tests for None, not just a boolean
+        # ui.getpath() explicitly tests for None, not just a boolean
         dest = None
-    path = repo.ui.paths.getpath(dest, default=(b'default-push', b'default'))
+    path = repo.ui.getpath(dest, default=(b'default-push', b'default'))
     if not path:
         raise error.Abort(
             _(b'default repository not configured!'),
@@ -1841,9 +1841,12 @@
     if revs:
         revs = [repo.lookup(rev) for rev in revs]
     other = hg.peer(repo, {}, dest)
-    repo.ui.pushbuffer()
-    outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
-    repo.ui.popbuffer()
+    try:
+        repo.ui.pushbuffer()
+        outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
+        repo.ui.popbuffer()
+    finally:
+        other.close()
     cl = repo.changelog
     o = {cl.rev(r) for r in outgoing.missing}
     return subset & o
--- a/mercurial/scmutil.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/scmutil.py	Thu Mar 18 18:24:59 2021 -0400
@@ -201,7 +201,9 @@
         msg = inst.args[1]
         if isinstance(msg, type(u'')):
             msg = pycompat.sysbytes(msg)
-        if not isinstance(msg, bytes):
+        if msg is None:
+            ui.error(b"\n")
+        elif not isinstance(msg, bytes):
             ui.error(b" %r\n" % (msg,))
         elif not msg:
             ui.error(_(b" empty string\n"))
@@ -229,6 +231,8 @@
             detailed_exit_code = 20
         elif isinstance(inst, error.ConfigError):
             detailed_exit_code = 30
+        elif isinstance(inst, error.HookAbort):
+            detailed_exit_code = 40
         elif isinstance(inst, error.SecurityError):
             detailed_exit_code = 150
         elif isinstance(inst, error.CanceledError):
--- a/mercurial/setdiscovery.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/setdiscovery.py	Thu Mar 18 18:24:59 2021 -0400
@@ -286,8 +286,6 @@
     ui,
     local,
     remote,
-    initialsamplesize=100,
-    fullsamplesize=200,
     abortwhenunrelated=True,
     ancestorsof=None,
     audit=None,
@@ -315,7 +313,8 @@
         ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
 
     initial_head_exchange = ui.configbool(b'devel', b'discovery.exchange-heads')
-
+    initialsamplesize = ui.configint(b'devel', b'discovery.sample-size.initial')
+    fullsamplesize = ui.configint(b'devel', b'discovery.sample-size')
     # We also ask remote about all the local heads. That set can be arbitrarily
     # large, so we used to limit it size to `initialsamplesize`. We no longer
     # do as it proved counter productive. The skipped heads could lead to a
--- a/mercurial/shelve.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/shelve.py	Thu Mar 18 18:24:59 2021 -0400
@@ -241,7 +241,7 @@
                 bin(h) for h in d[b'nodestoremove'].split(b' ')
             ]
         except (ValueError, TypeError, KeyError) as err:
-            raise error.CorruptedState(pycompat.bytestr(err))
+            raise error.CorruptedState(stringutil.forcebytestr(err))
 
     @classmethod
     def _getversion(cls, repo):
@@ -250,7 +250,7 @@
         try:
             version = int(fp.readline().strip())
         except ValueError as err:
-            raise error.CorruptedState(pycompat.bytestr(err))
+            raise error.CorruptedState(stringutil.forcebytestr(err))
         finally:
             fp.close()
         return version
@@ -812,7 +812,7 @@
     with repo.lock():
         checkparents(repo, state)
         ms = mergestatemod.mergestate.read(repo)
-        if list(ms.unresolved()):
+        if ms.unresolvedcount():
             raise error.Abort(
                 _(b"unresolved conflicts, can't continue"),
                 hint=_(b"see 'hg resolve', then 'hg unshelve --continue'"),
--- a/mercurial/simplemerge.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/simplemerge.py	Thu Mar 18 18:24:59 2021 -0400
@@ -402,31 +402,6 @@
 
         return sl
 
-    def find_unconflicted(self):
-        """Return a list of ranges in base that are not conflicted."""
-        am = mdiff.get_matching_blocks(self.basetext, self.atext)
-        bm = mdiff.get_matching_blocks(self.basetext, self.btext)
-
-        unc = []
-
-        while am and bm:
-            # there is an unconflicted block at i; how long does it
-            # extend?  until whichever one ends earlier.
-            a1 = am[0][0]
-            a2 = a1 + am[0][2]
-            b1 = bm[0][0]
-            b2 = b1 + bm[0][2]
-            i = intersect((a1, a2), (b1, b2))
-            if i:
-                unc.append(i)
-
-            if a2 < b2:
-                del am[0]
-            else:
-                del bm[0]
-
-        return unc
-
 
 def _verifytext(text, path, ui, opts):
     """verifies that text is non-binary (unless opts[text] is passed,
--- a/mercurial/sshpeer.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/sshpeer.py	Thu Mar 18 18:24:59 2021 -0400
@@ -40,7 +40,7 @@
     """display all data currently available on pipe as remote output.
 
     This is non blocking."""
-    if pipe:
+    if pipe and not pipe.closed:
         s = procutil.readpipe(pipe)
         if s:
             display = ui.warn if warn else ui.status
@@ -140,18 +140,26 @@
     def close(self):
         return self._main.close()
 
+    @property
+    def closed(self):
+        return self._main.closed
+
     def flush(self):
         return self._main.flush()
 
 
-def _cleanuppipes(ui, pipei, pipeo, pipee):
+def _cleanuppipes(ui, pipei, pipeo, pipee, warn):
     """Clean up pipes used by an SSH connection."""
-    if pipeo:
+    didsomething = False
+    if pipeo and not pipeo.closed:
+        didsomething = True
         pipeo.close()
-    if pipei:
+    if pipei and not pipei.closed:
+        didsomething = True
         pipei.close()
 
-    if pipee:
+    if pipee and not pipee.closed:
+        didsomething = True
         # Try to read from the err descriptor until EOF.
         try:
             for l in pipee:
@@ -161,6 +169,14 @@
 
         pipee.close()
 
+    if didsomething and warn is not None:
+        # Encourage explicit close of sshpeers. Closing via __del__ is
+        # not very predictable when exceptions are thrown, which has led
+        # to deadlocks due to a peer get gc'ed in a fork
+        # We add our own stack trace, because the stacktrace when called
+        # from __del__ is useless.
+        ui.develwarn(b'missing close on SSH connection created at:\n%s' % warn)
+
 
 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
     """Create an SSH connection to a server.
@@ -412,6 +428,7 @@
         self._pipee = stderr
         self._caps = caps
         self._autoreadstderr = autoreadstderr
+        self._initstack = b''.join(util.getstackframes(1))
 
     # Commands that have a "framed" response where the first line of the
     # response contains the length of that response.
@@ -434,7 +451,7 @@
         return True
 
     def close(self):
-        pass
+        self._cleanup()
 
     # End of ipeerconnection interface.
 
@@ -452,10 +469,11 @@
         self._cleanup()
         raise exception
 
-    def _cleanup(self):
-        _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee)
+    def _cleanup(self, warn=None):
+        _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee, warn=warn)
 
-    __del__ = _cleanup
+    def __del__(self):
+        self._cleanup(warn=self._initstack)
 
     def _sendrequest(self, cmd, args, framed=False):
         if self.ui.debugflag and self.ui.configbool(
@@ -607,7 +625,7 @@
     try:
         protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
     except Exception:
-        _cleanuppipes(ui, stdout, stdin, stderr)
+        _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
         raise
 
     if protoname == wireprototypes.SSHV1:
@@ -633,7 +651,7 @@
             autoreadstderr=autoreadstderr,
         )
     else:
-        _cleanuppipes(ui, stdout, stdin, stderr)
+        _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
         raise error.RepoError(
             _(b'unknown version of SSH protocol: %s') % protoname
         )
--- a/mercurial/statichttprepo.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/statichttprepo.py	Thu Mar 18 18:24:59 2021 -0400
@@ -12,6 +12,7 @@
 import errno
 
 from .i18n import _
+from .node import sha1nodeconstants
 from . import (
     branchmap,
     changelog,
@@ -172,6 +173,7 @@
         self.names = namespaces.namespaces()
         self.filtername = None
         self._extrafilterid = None
+        self._wanted_sidedata = set()
 
         try:
             requirements = set(self.vfs.read(b'requires').splitlines())
@@ -197,6 +199,8 @@
             requirements, supportedrequirements
         )
         localrepo.ensurerequirementscompatible(ui, requirements)
+        self.nodeconstants = sha1nodeconstants
+        self.nullid = self.nodeconstants.nullid
 
         # setup store
         self.store = localrepo.makestore(requirements, self.path, vfsclass)
@@ -206,7 +210,7 @@
         self._filecache = {}
         self.requirements = requirements
 
-        rootmanifest = manifest.manifestrevlog(self.svfs)
+        rootmanifest = manifest.manifestrevlog(self.nodeconstants, self.svfs)
         self.manifestlog = manifest.manifestlog(
             self.svfs, self, rootmanifest, self.narrowmatch()
         )
--- a/mercurial/store.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/store.py	Thu Mar 18 18:24:59 2021 -0400
@@ -387,13 +387,13 @@
     b'requires',
 ]
 
+REVLOG_FILES_EXT = (b'.i', b'.d', b'.n', b'.nd')
+
 
 def isrevlog(f, kind, st):
     if kind != stat.S_IFREG:
         return False
-    if f[-2:] in (b'.i', b'.d', b'.n'):
-        return True
-    return f[-3:] == b'.nd'
+    return f.endswith(REVLOG_FILES_EXT)
 
 
 class basicstore(object):
@@ -433,11 +433,15 @@
         l.sort()
         return l
 
-    def changelog(self, trypending):
-        return changelog.changelog(self.vfs, trypending=trypending)
+    def changelog(self, trypending, concurrencychecker=None):
+        return changelog.changelog(
+            self.vfs,
+            trypending=trypending,
+            concurrencychecker=concurrencychecker,
+        )
 
     def manifestlog(self, repo, storenarrowmatch):
-        rootstore = manifest.manifestrevlog(self.vfs)
+        rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
         return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
 
     def datafiles(self, matcher=None):
--- a/mercurial/streamclone.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/streamclone.py	Thu Mar 18 18:24:59 2021 -0400
@@ -20,6 +20,7 @@
     narrowspec,
     phases,
     pycompat,
+    requirements as requirementsmod,
     scmutil,
     store,
     util,
@@ -83,7 +84,7 @@
     # is advertised and contains a comma-delimited list of requirements.
     requirements = set()
     if remote.capable(b'stream'):
-        requirements.add(b'revlogv1')
+        requirements.add(requirementsmod.REVLOGV1_REQUIREMENT)
     else:
         streamreqs = remote.capable(b'streamreqs')
         # This is weird and shouldn't happen with modern servers.
--- a/mercurial/subrepo.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/subrepo.py	Thu Mar 18 18:24:59 2021 -0400
@@ -716,13 +716,17 @@
                     _(b'sharing subrepo %s from %s\n')
                     % (subrelpath(self), srcurl)
                 )
-                shared = hg.share(
-                    self._repo._subparent.baseui,
-                    getpeer(),
-                    self._repo.root,
-                    update=False,
-                    bookmarks=False,
-                )
+                peer = getpeer()
+                try:
+                    shared = hg.share(
+                        self._repo._subparent.baseui,
+                        peer,
+                        self._repo.root,
+                        update=False,
+                        bookmarks=False,
+                    )
+                finally:
+                    peer.close()
                 self._repo = shared.local()
             else:
                 # TODO: find a common place for this and this code in the
@@ -743,14 +747,18 @@
                     _(b'cloning subrepo %s from %s\n')
                     % (subrelpath(self), util.hidepassword(srcurl))
                 )
-                other, cloned = hg.clone(
-                    self._repo._subparent.baseui,
-                    {},
-                    getpeer(),
-                    self._repo.root,
-                    update=False,
-                    shareopts=shareopts,
-                )
+                peer = getpeer()
+                try:
+                    other, cloned = hg.clone(
+                        self._repo._subparent.baseui,
+                        {},
+                        peer,
+                        self._repo.root,
+                        update=False,
+                        shareopts=shareopts,
+                    )
+                finally:
+                    peer.close()
                 self._repo = cloned.local()
             self._initrepo(parentrepo, source, create=True)
             self._cachestorehash(srcurl)
@@ -760,7 +768,11 @@
                 % (subrelpath(self), util.hidepassword(srcurl))
             )
             cleansub = self.storeclean(srcurl)
-            exchange.pull(self._repo, getpeer())
+            peer = getpeer()
+            try:
+                exchange.pull(self._repo, peer)
+            finally:
+                peer.close()
             if cleansub:
                 # keep the repo clean after pull
                 self._cachestorehash(srcurl)
@@ -845,7 +857,10 @@
             % (subrelpath(self), util.hidepassword(dsturl))
         )
         other = hg.peer(self._repo, {b'ssh': ssh}, dsturl)
-        res = exchange.push(self._repo, other, force, newbranch=newbranch)
+        try:
+            res = exchange.push(self._repo, other, force, newbranch=newbranch)
+        finally:
+            other.close()
 
         # the repo is now clean
         self._cachestorehash(dsturl)
--- a/mercurial/subrepoutil.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/subrepoutil.py	Thu Mar 18 18:24:59 2021 -0400
@@ -27,8 +27,29 @@
 
 nullstate = (b'', b'', b'empty')
 
+if pycompat.TYPE_CHECKING:
+    from typing import (
+        Any,
+        Dict,
+        List,
+        Optional,
+        Set,
+        Tuple,
+    )
+    from . import (
+        context,
+        localrepo,
+        match as matchmod,
+        scmutil,
+        subrepo,
+        ui as uimod,
+    )
+
+    Substate = Dict[bytes, Tuple[bytes, bytes, bytes]]
+
 
 def state(ctx, ui):
+    # type: (context.changectx, uimod.ui) -> Substate
     """return a state dict, mapping subrepo paths configured in .hgsub
     to tuple: (source from .hgsub, revision from .hgsubstate, kind
     (key in types dict))
@@ -84,6 +105,7 @@
                 raise
 
     def remap(src):
+        # type: (bytes) -> bytes
         for pattern, repl in p.items(b'subpaths'):
             # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
             # does a string decode.
@@ -105,7 +127,7 @@
         return src
 
     state = {}
-    for path, src in p[b''].items():
+    for path, src in p.items(b''):  # type: bytes
         kind = b'hg'
         if src.startswith(b'['):
             if b']' not in src:
@@ -136,6 +158,7 @@
 
 
 def writestate(repo, state):
+    # type: (localrepo.localrepository, Substate) -> None
     """rewrite .hgsubstate in (outer) repo with these subrepo states"""
     lines = [
         b'%s %s\n' % (state[s][1], s)
@@ -146,6 +169,8 @@
 
 
 def submerge(repo, wctx, mctx, actx, overwrite, labels=None):
+    # type: (localrepo.localrepository, context.workingctx, context.changectx, context.changectx, bool, Optional[Any]) -> Substate
+    # TODO: type the `labels` arg
     """delegated from merge.applyupdates: merging of .hgsubstate file
     in working context, merging context and ancestor context"""
     if mctx == actx:  # backwards?
@@ -285,6 +310,7 @@
 
 
 def precommit(ui, wctx, status, match, force=False):
+    # type: (uimod.ui, context.workingcommitctx, scmutil.status, matchmod.basematcher, bool) -> Tuple[List[bytes], Set[bytes], Substate]
     """Calculate .hgsubstate changes that should be applied before committing
 
     Returns (subs, commitsubs, newstate) where
@@ -355,6 +381,7 @@
 
 
 def reporelpath(repo):
+    # type: (localrepo.localrepository) -> bytes
     """return path to this (sub)repo as seen from outermost repo"""
     parent = repo
     while util.safehasattr(parent, b'_subparent'):
@@ -363,11 +390,13 @@
 
 
 def subrelpath(sub):
+    # type: (subrepo.abstractsubrepo) -> bytes
     """return path to this subrepo as seen from outermost repo"""
     return sub._relpath
 
 
 def _abssource(repo, push=False, abort=True):
+    # type: (localrepo.localrepository, bool, bool) -> Optional[bytes]
     """return pull/push path of repo - either based on parent repo .hgsub info
     or on the top repo config. Abort or return None if no source found."""
     if util.safehasattr(repo, b'_subparent'):
@@ -416,6 +445,7 @@
 
 
 def newcommitphase(ui, ctx):
+    # type: (uimod.ui, context.changectx) -> int
     commitphase = phases.newcommitphase(ui)
     substate = getattr(ctx, "substate", None)
     if not substate:
--- a/mercurial/tags.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/tags.py	Thu Mar 18 18:24:59 2021 -0400
@@ -494,11 +494,25 @@
     starttime = util.timer()
     fnodescache = hgtagsfnodescache(repo.unfiltered())
     cachefnode = {}
+    validated_fnodes = set()
+    unknown_entries = set()
     for node in nodes:
         fnode = fnodescache.getfnode(node)
+        flog = repo.file(b'.hgtags')
         if fnode != nullid:
+            if fnode not in validated_fnodes:
+                if flog.hasnode(fnode):
+                    validated_fnodes.add(fnode)
+                else:
+                    unknown_entries.add(node)
             cachefnode[node] = fnode
 
+    if unknown_entries:
+        fixed_nodemap = fnodescache.refresh_invalid_nodes(unknown_entries)
+        for node, fnode in pycompat.iteritems(fixed_nodemap):
+            if fnode != nullid:
+                cachefnode[node] = fnode
+
     fnodescache.write()
 
     duration = util.timer() - starttime
@@ -733,6 +747,7 @@
         if rawlen < wantedlen:
             if self._dirtyoffset is None:
                 self._dirtyoffset = rawlen
+            # TODO: zero fill entire record, because it's invalid not missing?
             self._raw.extend(b'\xff' * (wantedlen - rawlen))
 
     def getfnode(self, node, computemissing=True):
@@ -740,7 +755,8 @@
 
         If the value is in the cache, the entry will be validated and returned.
         Otherwise, the filenode will be computed and returned unless
-        "computemissing" is False, in which case None will be returned without
+        "computemissing" is False.  In that case, None will be returned if
+        the entry is missing or False if the entry is invalid without
         any potentially expensive computation being performed.
 
         If an .hgtags does not exist at the specified revision, nullid is
@@ -771,8 +787,19 @@
         # If we get here, the entry is either missing or invalid.
 
         if not computemissing:
+            if record != _fnodesmissingrec:
+                return False
             return None
 
+        fnode = self._computefnode(node)
+        self._writeentry(offset, properprefix, fnode)
+        return fnode
+
+    def _computefnode(self, node):
+        """Finds the tag filenode for a node which is missing or invalid
+        in cache"""
+        ctx = self._repo[node]
+        rev = ctx.rev()
         fnode = None
         cl = self._repo.changelog
         p1rev, p2rev = cl._uncheckedparentrevs(rev)
@@ -788,7 +815,7 @@
                 # we cannot rely on readfast because we don't know against what
                 # parent the readfast delta is computed
                 p1fnode = None
-        if p1fnode is not None:
+        if p1fnode:
             mctx = ctx.manifestctx()
             fnode = mctx.readfast().get(b'.hgtags')
             if fnode is None:
@@ -800,8 +827,6 @@
             except error.LookupError:
                 # No .hgtags file on this revision.
                 fnode = nullid
-
-        self._writeentry(offset, properprefix, fnode)
         return fnode
 
     def setfnode(self, node, fnode):
@@ -815,6 +840,21 @@
 
         self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
 
+    def refresh_invalid_nodes(self, nodes):
+        """recomputes file nodes for a given set of nodes which has unknown
+        filenodes for them in the cache
+        Also updates the in-memory cache with the correct filenode.
+        Caller needs to take care about calling `.write()` so that updates are
+        persisted.
+        Returns a map {node: recomputed fnode}
+        """
+        fixed_nodemap = {}
+        for node in nodes:
+            fnode = self._computefnode(node)
+            fixed_nodemap[node] = fnode
+            self.setfnode(node, fnode)
+        return fixed_nodemap
+
     def _writeentry(self, offset, prefix, fnode):
         # Slices on array instances only accept other array.
         entry = bytearray(prefix + fnode)
--- a/mercurial/templater.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/templater.py	Thu Mar 18 18:24:59 2021 -0400
@@ -891,7 +891,7 @@
         fp = _open_mapfile(path)
         cache, tmap, aliases = _readmapfile(fp, path)
 
-    for key, val in conf[b'templates'].items():
+    for key, val in conf.items(b'templates'):
         if not val:
             raise error.ParseError(
                 _(b'missing value'), conf.source(b'templates', key)
@@ -904,7 +904,7 @@
             cache[key] = unquotestring(val)
         elif key != b'__base__':
             tmap[key] = os.path.join(base, val)
-    aliases.extend(conf[b'templatealias'].items())
+    aliases.extend(conf.items(b'templatealias'))
     return cache, tmap, aliases
 
 
--- a/mercurial/testing/storage.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/testing/storage.py	Thu Mar 18 18:24:59 2021 -0400
@@ -1129,12 +1129,13 @@
         with self._maketransactionfn() as tr:
             nodes = []
 
-            def onchangeset(cl, node):
+            def onchangeset(cl, rev):
+                node = cl.node(rev)
                 nodes.append(node)
                 cb(cl, node)
 
-            def ondupchangeset(cl, node):
-                nodes.append(node)
+            def ondupchangeset(cl, rev):
+                nodes.append(cl.node(rev))
 
             f.addgroup(
                 [],
@@ -1157,18 +1158,19 @@
         f = self._makefilefn()
 
         deltas = [
-            (node0, nullid, nullid, nullid, nullid, delta0, 0),
+            (node0, nullid, nullid, nullid, nullid, delta0, 0, {}),
         ]
 
         with self._maketransactionfn() as tr:
             nodes = []
 
-            def onchangeset(cl, node):
+            def onchangeset(cl, rev):
+                node = cl.node(rev)
                 nodes.append(node)
                 cb(cl, node)
 
-            def ondupchangeset(cl, node):
-                nodes.append(node)
+            def ondupchangeset(cl, rev):
+                nodes.append(cl.node(rev))
 
             f.addgroup(
                 deltas,
@@ -1212,13 +1214,15 @@
         for i, fulltext in enumerate(fulltexts):
             delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
 
-            deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
+            deltas.append(
+                (nodes[i], nullid, nullid, nullid, nullid, delta, 0, {})
+            )
 
         with self._maketransactionfn() as tr:
             newnodes = []
 
-            def onchangeset(cl, node):
-                newnodes.append(node)
+            def onchangeset(cl, rev):
+                newnodes.append(cl.node(rev))
 
             f.addgroup(
                 deltas,
@@ -1260,7 +1264,9 @@
             )
 
         delta = mdiff.textdiff(b'bar\n' * 30, (b'bar\n' * 30) + b'baz\n')
-        deltas = [(b'\xcc' * 20, node1, nullid, b'\x01' * 20, node1, delta, 0)]
+        deltas = [
+            (b'\xcc' * 20, node1, nullid, b'\x01' * 20, node1, delta, 0, {})
+        ]
 
         with self._maketransactionfn() as tr:
             with self.assertRaises(error.CensoredBaseError):
--- a/mercurial/ui.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/ui.py	Thu Mar 18 18:24:59 2021 -0400
@@ -302,6 +302,11 @@
                 if k in self.environ:
                     self._exportableenviron[k] = self.environ[k]
 
+    def _new_source(self):
+        self._ocfg.new_source()
+        self._tcfg.new_source()
+        self._ucfg.new_source()
+
     @classmethod
     def load(cls):
         """Create a ui and load global and user configs"""
@@ -313,6 +318,7 @@
             elif t == b'resource':
                 u.read_resource_config(f, trust=True)
             elif t == b'items':
+                u._new_source()
                 sections = set()
                 for section, name, value, source in f:
                     # do not set u._ocfg
@@ -325,6 +331,7 @@
             else:
                 raise error.ProgrammingError(b'unknown rctype: %s' % t)
         u._maybetweakdefaults()
+        u._new_source()  # anything after that is a different level
         return u
 
     def _maybetweakdefaults(self):
@@ -554,7 +561,7 @@
                     p = util.expandpath(p)
                     if not util.hasscheme(p) and not os.path.isabs(p):
                         p = os.path.normpath(os.path.join(root, p))
-                    c.set(b"paths", n, p)
+                    c.alter(b"paths", n, p)
 
         if section in (None, b'ui'):
             # update ui options
@@ -655,11 +662,18 @@
             msg %= (section, name, pycompat.bytestr(default))
             self.develwarn(msg, 2, b'warn-config-default')
 
+        candidates = []
+        config = self._data(untrusted)
         for s, n in alternates:
-            candidate = self._data(untrusted).get(s, n, None)
+            candidate = config.get(s, n, None)
             if candidate is not None:
-                value = candidate
-                break
+                candidates.append((s, n, candidate))
+        if candidates:
+
+            def level(x):
+                return config.level(x[0], x[1])
+
+            value = max(candidates, key=level)[2]
 
         if self.debugflag and not untrusted and self._reportuntrusted:
             for s, n in alternates:
@@ -1017,7 +1031,7 @@
     def expandpath(self, loc, default=None):
         """Return repository location relative to cwd or from [paths]"""
         try:
-            p = self.paths.getpath(loc)
+            p = self.getpath(loc)
             if p:
                 return p.rawloc
         except error.RepoError:
@@ -1025,7 +1039,7 @@
 
         if default:
             try:
-                p = self.paths.getpath(default)
+                p = self.getpath(default)
                 if p:
                     return p.rawloc
             except error.RepoError:
@@ -1037,6 +1051,13 @@
     def paths(self):
         return paths(self)
 
+    def getpath(self, *args, **kwargs):
+        """see paths.getpath for details
+
+        This method exist as `getpath` need a ui for potential warning message.
+        """
+        return self.paths.getpath(self, *args, **kwargs)
+
     @property
     def fout(self):
         return self._fout
@@ -2169,14 +2190,18 @@
     def __init__(self, ui):
         dict.__init__(self)
 
+        _path, base_sub_options = ui.configsuboptions(b'paths', b'*')
         for name, loc in ui.configitems(b'paths', ignoresub=True):
             # No location is the same as not existing.
             if not loc:
                 continue
             loc, sub = ui.configsuboptions(b'paths', name)
-            self[name] = path(ui, name, rawloc=loc, suboptions=sub)
+            sub_opts = base_sub_options.copy()
+            sub_opts.update(sub)
+            self[name] = path(ui, name, rawloc=loc, suboptions=sub_opts)
+        self._default_sub_opts = base_sub_options
 
-    def getpath(self, name, default=None):
+    def getpath(self, ui, name, default=None):
         """Return a ``path`` from a string, falling back to default.
 
         ``name`` can be a named path or locations. Locations are filesystem
@@ -2208,8 +2233,10 @@
         except KeyError:
             # Try to resolve as a local path or URI.
             try:
-                # We don't pass sub-options in, so no need to pass ui instance.
-                return path(None, None, rawloc=name)
+                # we pass the ui instance are warning might need to be issued
+                return path(
+                    ui, None, rawloc=name, suboptions=self._default_sub_opts
+                )
             except ValueError:
                 raise error.RepoError(_(b'repository %s does not exist') % name)
 
--- a/mercurial/unionrepo.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/unionrepo.py	Thu Mar 18 18:24:59 2021 -0400
@@ -128,6 +128,7 @@
         deltas,
         linkmapper,
         transaction,
+        alwayscache=False,
         addrevisioncb=None,
         duplicaterevisioncb=None,
         maybemissingparents=False,
@@ -152,9 +153,9 @@
 
 
 class unionmanifest(unionrevlog, manifest.manifestrevlog):
-    def __init__(self, opener, opener2, linkmapper):
-        manifest.manifestrevlog.__init__(self, opener)
-        manifest2 = manifest.manifestrevlog(opener2)
+    def __init__(self, nodeconstants, opener, opener2, linkmapper):
+        manifest.manifestrevlog.__init__(self, nodeconstants, opener)
+        manifest2 = manifest.manifestrevlog(nodeconstants, opener2)
         unionrevlog.__init__(
             self, opener, self.indexfile, manifest2, linkmapper
         )
@@ -204,7 +205,10 @@
     @localrepo.unfilteredpropertycache
     def manifestlog(self):
         rootstore = unionmanifest(
-            self.svfs, self.repo2.svfs, self.unfiltered()._clrev
+            self.nodeconstants,
+            self.svfs,
+            self.repo2.svfs,
+            self.unfiltered()._clrev,
         )
         return manifest.manifestlog(
             self.svfs, self, rootstore, self.narrowmatch()
--- a/mercurial/upgrade.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/upgrade.py	Thu Mar 18 18:24:59 2021 -0400
@@ -118,6 +118,7 @@
         up_actions,
         removed_actions,
         revlogs,
+        backup,
     )
 
     if not run:
@@ -215,12 +216,6 @@
                 backuppath = upgrade_engine.upgrade(
                     ui, repo, dstrepo, upgrade_op
                 )
-            if not backup:
-                ui.status(
-                    _(b'removing old repository content %s\n') % backuppath
-                )
-                repo.vfs.rmtree(backuppath, forcibly=True)
-                backuppath = None
 
         finally:
             ui.status(_(b'removing temporary repository %s\n') % tmppath)
--- a/mercurial/upgrade_utils/actions.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/upgrade_utils/actions.py	Thu Mar 18 18:24:59 2021 -0400
@@ -20,7 +20,7 @@
 
 # list of requirements that request a clone of all revlog if added/removed
 RECLONES_REQUIREMENTS = {
-    b'generaldelta',
+    requirements.GENERALDELTA_REQUIREMENT,
     requirements.SPARSEREVLOG_REQUIREMENT,
 }
 
@@ -66,6 +66,18 @@
     postdowngrademessage
        Message intended for humans which will be shown post an upgrade
        operation in which this improvement was removed
+
+    touches_filelogs (bool)
+        Whether this improvement touches filelogs
+
+    touches_manifests (bool)
+        Whether this improvement touches manifests
+
+    touches_changelog (bool)
+        Whether this improvement touches changelog
+
+    touches_requirements (bool)
+        Whether this improvement changes repository requirements
     """
 
     def __init__(self, name, type, description, upgrademessage):
@@ -75,6 +87,12 @@
         self.upgrademessage = upgrademessage
         self.postupgrademessage = None
         self.postdowngrademessage = None
+        # By default for now, we assume every improvement touches
+        # all the things
+        self.touches_filelogs = True
+        self.touches_manifests = True
+        self.touches_changelog = True
+        self.touches_requirements = True
 
     def __eq__(self, other):
         if not isinstance(other, improvement):
@@ -128,6 +146,12 @@
     # operation in which this improvement was removed
     postdowngrademessage = None
 
+    # By default for now, we assume every improvement touches all the things
+    touches_filelogs = True
+    touches_manifests = True
+    touches_changelog = True
+    touches_requirements = True
+
     def __init__(self):
         raise NotImplementedError()
 
@@ -173,7 +197,7 @@
 class fncache(requirementformatvariant):
     name = b'fncache'
 
-    _requirement = b'fncache'
+    _requirement = requirements.FNCACHE_REQUIREMENT
 
     default = True
 
@@ -193,7 +217,7 @@
 class dotencode(requirementformatvariant):
     name = b'dotencode'
 
-    _requirement = b'dotencode'
+    _requirement = requirements.DOTENCODE_REQUIREMENT
 
     default = True
 
@@ -212,7 +236,7 @@
 class generaldelta(requirementformatvariant):
     name = b'generaldelta'
 
-    _requirement = b'generaldelta'
+    _requirement = requirements.GENERALDELTA_REQUIREMENT
 
     default = True
 
@@ -267,6 +291,12 @@
         b' New shares will be created in safe mode.'
     )
 
+    # upgrade only needs to change the requirements
+    touches_filelogs = False
+    touches_manifests = False
+    touches_changelog = False
+    touches_requirements = True
+
 
 @registerformatvariant
 class sparserevlog(requirementformatvariant):
@@ -295,22 +325,6 @@
 
 
 @registerformatvariant
-class sidedata(requirementformatvariant):
-    name = b'sidedata'
-
-    _requirement = requirements.SIDEDATA_REQUIREMENT
-
-    default = False
-
-    description = _(
-        b'Allows storage of extra data alongside a revision, '
-        b'unlocking various caching options.'
-    )
-
-    upgrademessage = _(b'Allows storage of extra data alongside a revision.')
-
-
-@registerformatvariant
 class persistentnodemap(requirementformatvariant):
     name = b'persistent-nodemap'
 
@@ -341,6 +355,15 @@
 
 
 @registerformatvariant
+class revlogv2(requirementformatvariant):
+    name = b'revlog-v2'
+    _requirement = requirements.REVLOGV2_REQUIREMENT
+    default = False
+    description = _(b'Version 2 of the revlog.')
+    upgrademessage = _(b'very experimental')
+
+
+@registerformatvariant
 class removecldeltachain(formatvariant):
     name = b'plain-cl-delta'
 
@@ -626,6 +649,7 @@
         upgrade_actions,
         removed_actions,
         revlogs_to_process,
+        backup_store,
     ):
         self.ui = ui
         self.new_requirements = new_requirements
@@ -670,6 +694,75 @@
             b're-delta-multibase' in self._upgrade_actions_names
         )
 
+        # should this operation create a backup of the store
+        self.backup_store = backup_store
+
+        # whether the operation touches different revlogs at all or not
+        self.touches_filelogs = self._touches_filelogs()
+        self.touches_manifests = self._touches_manifests()
+        self.touches_changelog = self._touches_changelog()
+        # whether the operation touches requirements file or not
+        self.touches_requirements = self._touches_requirements()
+        self.touches_store = (
+            self.touches_filelogs
+            or self.touches_manifests
+            or self.touches_changelog
+        )
+        # does the operation only touches repository requirement
+        self.requirements_only = (
+            self.touches_requirements and not self.touches_store
+        )
+
+    def _touches_filelogs(self):
+        for a in self.upgrade_actions:
+            # in optimisations, we re-process the revlogs again
+            if a.type == OPTIMISATION:
+                return True
+            elif a.touches_filelogs:
+                return True
+        for a in self.removed_actions:
+            if a.touches_filelogs:
+                return True
+        return False
+
+    def _touches_manifests(self):
+        for a in self.upgrade_actions:
+            # in optimisations, we re-process the revlogs again
+            if a.type == OPTIMISATION:
+                return True
+            elif a.touches_manifests:
+                return True
+        for a in self.removed_actions:
+            if a.touches_manifests:
+                return True
+        return False
+
+    def _touches_changelog(self):
+        for a in self.upgrade_actions:
+            # in optimisations, we re-process the revlogs again
+            if a.type == OPTIMISATION:
+                return True
+            elif a.touches_changelog:
+                return True
+        for a in self.removed_actions:
+            if a.touches_changelog:
+                return True
+        return False
+
+    def _touches_requirements(self):
+        for a in self.upgrade_actions:
+            # optimisations are used to re-process revlogs and does not result
+            # in a requirement being added or removed
+            if a.type == OPTIMISATION:
+                pass
+            elif a.touches_requirements:
+                return True
+        for a in self.removed_actions:
+            if a.touches_requirements:
+                return True
+
+        return False
+
     def _write_labeled(self, l, label):
         """
         Utility function to aid writing of a list under one label
@@ -757,9 +850,7 @@
     """
     return {
         # Introduced in Mercurial 0.9.2.
-        b'revlogv1',
-        # Introduced in Mercurial 0.9.2.
-        b'store',
+        requirements.STORE_REQUIREMENT,
     }
 
 
@@ -781,9 +872,21 @@
     }
 
 
+def check_revlog_version(reqs):
+    """Check that the requirements contain at least one Revlog version"""
+    all_revlogs = {
+        requirements.REVLOGV1_REQUIREMENT,
+        requirements.REVLOGV2_REQUIREMENT,
+    }
+    if not all_revlogs.intersection(reqs):
+        msg = _(b'cannot upgrade repository; missing a revlog version')
+        raise error.Abort(msg)
+
+
 def check_source_requirements(repo):
     """Ensure that no existing requirements prevent the repository upgrade"""
 
+    check_revlog_version(repo.requirements)
     required = requiredsourcerequirements(repo)
     missingreqs = required - repo.requirements
     if missingreqs:
@@ -815,6 +918,8 @@
         requirements.COPIESSDC_REQUIREMENT,
         requirements.NODEMAP_REQUIREMENT,
         requirements.SHARESAFE_REQUIREMENT,
+        requirements.REVLOGV2_REQUIREMENT,
+        requirements.REVLOGV1_REQUIREMENT,
     }
     for name in compression.compengines:
         engine = compression.compengines[name]
@@ -834,16 +939,17 @@
     Extensions should monkeypatch this to add their custom requirements.
     """
     supported = {
-        b'dotencode',
-        b'fncache',
-        b'generaldelta',
-        b'revlogv1',
-        b'store',
+        requirements.DOTENCODE_REQUIREMENT,
+        requirements.FNCACHE_REQUIREMENT,
+        requirements.GENERALDELTA_REQUIREMENT,
+        requirements.REVLOGV1_REQUIREMENT,  # allowed in case of downgrade
+        requirements.STORE_REQUIREMENT,
         requirements.SPARSEREVLOG_REQUIREMENT,
         requirements.SIDEDATA_REQUIREMENT,
         requirements.COPIESSDC_REQUIREMENT,
         requirements.NODEMAP_REQUIREMENT,
         requirements.SHARESAFE_REQUIREMENT,
+        requirements.REVLOGV2_REQUIREMENT,
     }
     for name in compression.compengines:
         engine = compression.compengines[name]
@@ -865,14 +971,16 @@
     future, unknown requirements from accidentally being added.
     """
     supported = {
-        b'dotencode',
-        b'fncache',
-        b'generaldelta',
+        requirements.DOTENCODE_REQUIREMENT,
+        requirements.FNCACHE_REQUIREMENT,
+        requirements.GENERALDELTA_REQUIREMENT,
         requirements.SPARSEREVLOG_REQUIREMENT,
         requirements.SIDEDATA_REQUIREMENT,
         requirements.COPIESSDC_REQUIREMENT,
         requirements.NODEMAP_REQUIREMENT,
         requirements.SHARESAFE_REQUIREMENT,
+        requirements.REVLOGV1_REQUIREMENT,
+        requirements.REVLOGV2_REQUIREMENT,
     }
     for name in compression.compengines:
         engine = compression.compengines[name]
@@ -885,7 +993,7 @@
 
 def check_requirements_changes(repo, new_reqs):
     old_reqs = repo.requirements
-
+    check_revlog_version(repo.requirements)
     support_removal = supportremovedrequirements(repo)
     no_remove_reqs = old_reqs - new_reqs - support_removal
     if no_remove_reqs:
--- a/mercurial/upgrade_utils/engine.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/upgrade_utils/engine.py	Thu Mar 18 18:24:59 2021 -0400
@@ -24,6 +24,7 @@
     util,
     vfs as vfsmod,
 )
+from ..revlogutils import nodemap
 
 
 def _revlogfrompath(repo, path):
@@ -35,7 +36,9 @@
         return changelog.changelog(repo.svfs)
     elif path.endswith(b'00manifest.i'):
         mandir = path[: -len(b'00manifest.i')]
-        return manifest.manifestrevlog(repo.svfs, tree=mandir)
+        return manifest.manifestrevlog(
+            repo.nodeconstants, repo.svfs, tree=mandir
+        )
     else:
         # reverse of "/".join(("data", path + ".i"))
         return filelog.filelog(repo.svfs, path[5:-2])
@@ -412,7 +415,10 @@
     """
     # TODO: don't blindly rename everything in store
     # There can be upgrades where store is not touched at all
-    util.rename(currentrepo.spath, backupvfs.join(b'store'))
+    if upgrade_op.backup_store:
+        util.rename(currentrepo.spath, backupvfs.join(b'store'))
+    else:
+        currentrepo.vfs.rmtree(b'store', forcibly=True)
     util.rename(upgradedrepo.spath, currentrepo.spath)
 
 
@@ -436,6 +442,8 @@
     """
     assert srcrepo.currentwlock()
     assert dstrepo.currentwlock()
+    backuppath = None
+    backupvfs = None
 
     ui.status(
         _(
@@ -444,79 +452,136 @@
         )
     )
 
-    with dstrepo.transaction(b'upgrade') as tr:
-        _clonerevlogs(
-            ui,
-            srcrepo,
-            dstrepo,
-            tr,
-            upgrade_op,
+    if upgrade_op.requirements_only:
+        ui.status(_(b'upgrading repository requirements\n'))
+        scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
+    # if there is only one action and that is persistent nodemap upgrade
+    # directly write the nodemap file and update requirements instead of going
+    # through the whole cloning process
+    elif (
+        len(upgrade_op.upgrade_actions) == 1
+        and b'persistent-nodemap' in upgrade_op._upgrade_actions_names
+        and not upgrade_op.removed_actions
+    ):
+        ui.status(
+            _(b'upgrading repository to use persistent nodemap feature\n')
+        )
+        with srcrepo.transaction(b'upgrade') as tr:
+            unfi = srcrepo.unfiltered()
+            cl = unfi.changelog
+            nodemap.persist_nodemap(tr, cl, force=True)
+            # we want to directly operate on the underlying revlog to force
+            # create a nodemap file. This is fine since this is upgrade code
+            # and it heavily relies on repository being revlog based
+            # hence accessing private attributes can be justified
+            nodemap.persist_nodemap(
+                tr, unfi.manifestlog._rootstore._revlog, force=True
+            )
+        scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
+    elif (
+        len(upgrade_op.removed_actions) == 1
+        and [
+            x
+            for x in upgrade_op.removed_actions
+            if x.name == b'persistent-nodemap'
+        ]
+        and not upgrade_op.upgrade_actions
+    ):
+        ui.status(
+            _(b'downgrading repository to not use persistent nodemap feature\n')
+        )
+        with srcrepo.transaction(b'upgrade') as tr:
+            unfi = srcrepo.unfiltered()
+            cl = unfi.changelog
+            nodemap.delete_nodemap(tr, srcrepo, cl)
+            # check comment 20 lines above for accessing private attributes
+            nodemap.delete_nodemap(
+                tr, srcrepo, unfi.manifestlog._rootstore._revlog
+            )
+        scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
+    else:
+        with dstrepo.transaction(b'upgrade') as tr:
+            _clonerevlogs(
+                ui,
+                srcrepo,
+                dstrepo,
+                tr,
+                upgrade_op,
+            )
+
+        # Now copy other files in the store directory.
+        for p in _files_to_copy_post_revlog_clone(srcrepo):
+            srcrepo.ui.status(_(b'copying %s\n') % p)
+            src = srcrepo.store.rawvfs.join(p)
+            dst = dstrepo.store.rawvfs.join(p)
+            util.copyfile(src, dst, copystat=True)
+
+        finishdatamigration(ui, srcrepo, dstrepo, requirements)
+
+        ui.status(_(b'data fully upgraded in a temporary repository\n'))
+
+        if upgrade_op.backup_store:
+            backuppath = pycompat.mkdtemp(
+                prefix=b'upgradebackup.', dir=srcrepo.path
+            )
+            backupvfs = vfsmod.vfs(backuppath)
+
+            # Make a backup of requires file first, as it is the first to be modified.
+            util.copyfile(
+                srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')
+            )
+
+        # We install an arbitrary requirement that clients must not support
+        # as a mechanism to lock out new clients during the data swap. This is
+        # better than allowing a client to continue while the repository is in
+        # an inconsistent state.
+        ui.status(
+            _(
+                b'marking source repository as being upgraded; clients will be '
+                b'unable to read from repository\n'
+            )
+        )
+        scmutil.writereporequirements(
+            srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
         )
 
-    # Now copy other files in the store directory.
-    for p in _files_to_copy_post_revlog_clone(srcrepo):
-        srcrepo.ui.status(_(b'copying %s\n') % p)
-        src = srcrepo.store.rawvfs.join(p)
-        dst = dstrepo.store.rawvfs.join(p)
-        util.copyfile(src, dst, copystat=True)
-
-    finishdatamigration(ui, srcrepo, dstrepo, requirements)
-
-    ui.status(_(b'data fully upgraded in a temporary repository\n'))
-
-    backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
-    backupvfs = vfsmod.vfs(backuppath)
-
-    # Make a backup of requires file first, as it is the first to be modified.
-    util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
-
-    # We install an arbitrary requirement that clients must not support
-    # as a mechanism to lock out new clients during the data swap. This is
-    # better than allowing a client to continue while the repository is in
-    # an inconsistent state.
-    ui.status(
-        _(
-            b'marking source repository as being upgraded; clients will be '
-            b'unable to read from repository\n'
-        )
-    )
-    scmutil.writereporequirements(
-        srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
-    )
+        ui.status(_(b'starting in-place swap of repository data\n'))
+        if upgrade_op.backup_store:
+            ui.status(
+                _(b'replaced files will be backed up at %s\n') % backuppath
+            )
 
-    ui.status(_(b'starting in-place swap of repository data\n'))
-    ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
-
-    # Now swap in the new store directory. Doing it as a rename should make
-    # the operation nearly instantaneous and atomic (at least in well-behaved
-    # environments).
-    ui.status(_(b'replacing store...\n'))
-    tstart = util.timer()
-    _replacestores(srcrepo, dstrepo, backupvfs, upgrade_op)
-    elapsed = util.timer() - tstart
-    ui.status(
-        _(
-            b'store replacement complete; repository was inconsistent for '
-            b'%0.1fs\n'
+        # Now swap in the new store directory. Doing it as a rename should make
+        # the operation nearly instantaneous and atomic (at least in well-behaved
+        # environments).
+        ui.status(_(b'replacing store...\n'))
+        tstart = util.timer()
+        _replacestores(srcrepo, dstrepo, backupvfs, upgrade_op)
+        elapsed = util.timer() - tstart
+        ui.status(
+            _(
+                b'store replacement complete; repository was inconsistent for '
+                b'%0.1fs\n'
+            )
+            % elapsed
         )
-        % elapsed
-    )
 
-    # We first write the requirements file. Any new requirements will lock
-    # out legacy clients.
-    ui.status(
-        _(
-            b'finalizing requirements file and making repository readable '
-            b'again\n'
+        # We first write the requirements file. Any new requirements will lock
+        # out legacy clients.
+        ui.status(
+            _(
+                b'finalizing requirements file and making repository readable '
+                b'again\n'
+            )
         )
-    )
-    scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
+        scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
 
-    # The lock file from the old store won't be removed because nothing has a
-    # reference to its new location. So clean it up manually. Alternatively, we
-    # could update srcrepo.svfs and other variables to point to the new
-    # location. This is simpler.
-    assert backupvfs is not None  # help pytype
-    backupvfs.unlink(b'store/lock')
+        if upgrade_op.backup_store:
+            # The lock file from the old store won't be removed because nothing has a
+            # reference to its new location. So clean it up manually. Alternatively, we
+            # could update srcrepo.svfs and other variables to point to the new
+            # location. This is simpler.
+            assert backupvfs is not None  # help pytype
+            backupvfs.unlink(b'store/lock')
 
     return backuppath
--- a/mercurial/util.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/util.py	Thu Mar 18 18:24:59 2021 -0400
@@ -59,6 +59,16 @@
     stringutil,
 )
 
+if pycompat.TYPE_CHECKING:
+    from typing import (
+        Iterator,
+        List,
+        Optional,
+        Tuple,
+        Union,
+    )
+
+
 base85 = policy.importmod('base85')
 osutil = policy.importmod('osutil')
 
@@ -133,6 +143,7 @@
 
 
 def setumask(val):
+    # type: (int) -> None
     ''' updates the umask. used by chg server '''
     if pycompat.iswindows:
         return
@@ -307,7 +318,7 @@
 
 
 try:
-    buffer = buffer
+    buffer = buffer  # pytype: disable=name-error
 except NameError:
 
     def buffer(sliceable, offset=0, length=None):
@@ -1833,6 +1844,7 @@
 
 
 def pathto(root, n1, n2):
+    # type: (bytes, bytes, bytes) -> bytes
     """return the relative path from one place to another.
     root should use os.sep to separate directories
     n1 should use os.sep to separate directories
@@ -2017,6 +2029,7 @@
 
 
 def checkwinfilename(path):
+    # type: (bytes) -> Optional[bytes]
     r"""Check that the base-relative path is a valid filename on Windows.
     Returns None if the path is ok, or a UI string describing the problem.
 
@@ -2111,6 +2124,7 @@
 
 
 def readlock(pathname):
+    # type: (bytes) -> bytes
     try:
         return readlink(pathname)
     except OSError as why:
@@ -2134,6 +2148,7 @@
 
 
 def fscasesensitive(path):
+    # type: (bytes) -> bool
     """
     Return true if the given path is on a case-sensitive filesystem
 
@@ -2215,6 +2230,7 @@
 
 
 def fspath(name, root):
+    # type: (bytes, bytes) -> bytes
     """Get name in the case stored in the filesystem
 
     The name should be relative to root, and be normcase-ed for efficiency.
@@ -2259,6 +2275,7 @@
 
 
 def checknlink(testfile):
+    # type: (bytes) -> bool
     '''check whether hardlink count reporting works properly'''
 
     # testfile may be open, so we need a separate file for checking to
@@ -2292,8 +2309,9 @@
 
 
 def endswithsep(path):
+    # type: (bytes) -> bool
     '''Check path ends with os.sep or os.altsep.'''
-    return (
+    return bool(  # help pytype
         path.endswith(pycompat.ossep)
         or pycompat.osaltsep
         and path.endswith(pycompat.osaltsep)
@@ -2301,6 +2319,7 @@
 
 
 def splitpath(path):
+    # type: (bytes) -> List[bytes]
     """Split path by os.sep.
     Note that this function does not use os.altsep because this is
     an alternative of simple "xxx.split(os.sep)".
@@ -2529,6 +2548,7 @@
 
 
 def unlinkpath(f, ignoremissing=False, rmdir=True):
+    # type: (bytes, bool, bool) -> None
     """unlink and remove the directory if it is empty"""
     if ignoremissing:
         tryunlink(f)
@@ -2543,6 +2563,7 @@
 
 
 def tryunlink(f):
+    # type: (bytes) -> None
     """Attempt to remove a file, ignoring ENOENT errors."""
     try:
         unlink(f)
@@ -2552,6 +2573,7 @@
 
 
 def makedirs(name, mode=None, notindexed=False):
+    # type: (bytes, Optional[int], bool) -> None
     """recursive directory creation with parent mode inheritance
 
     Newly created directories are marked as "not to be indexed by
@@ -2581,16 +2603,19 @@
 
 
 def readfile(path):
+    # type: (bytes) -> bytes
     with open(path, b'rb') as fp:
         return fp.read()
 
 
 def writefile(path, text):
+    # type: (bytes, bytes) -> None
     with open(path, b'wb') as fp:
         fp.write(text)
 
 
 def appendfile(path, text):
+    # type: (bytes, bytes) -> None
     with open(path, b'ab') as fp:
         fp.write(text)
 
@@ -2752,6 +2777,7 @@
 
 
 def processlinerange(fromline, toline):
+    # type: (int, int) -> Tuple[int, int]
     """Check that linerange <fromline>:<toline> makes sense and return a
     0-based range.
 
@@ -2811,10 +2837,12 @@
 
 
 def tolf(s):
+    # type: (bytes) -> bytes
     return _eolre.sub(b'\n', s)
 
 
 def tocrlf(s):
+    # type: (bytes) -> bytes
     return _eolre.sub(b'\r\n', s)
 
 
@@ -2878,12 +2906,14 @@
 
 
 def iterlines(iterator):
+    # type: (Iterator[bytes]) -> Iterator[bytes]
     for chunk in iterator:
         for line in chunk.splitlines():
             yield line
 
 
 def expandpath(path):
+    # type: (bytes) -> bytes
     return os.path.expanduser(os.path.expandvars(path))
 
 
@@ -2914,6 +2944,7 @@
 
 
 def getport(port):
+    # type: (Union[bytes, int]) -> int
     """Return the port for a given network service.
 
     If port is an integer, it's returned as is. If it's a string, it's
@@ -3012,6 +3043,7 @@
     _matchscheme = remod.compile(b'^[a-zA-Z0-9+.\\-]+:').match
 
     def __init__(self, path, parsequery=True, parsefragment=True):
+        # type: (bytes, bool, bool) -> None
         # We slowly chomp away at path until we have only the path left
         self.scheme = self.user = self.passwd = self.host = None
         self.port = self.path = self.query = self.fragment = None
@@ -3239,6 +3271,7 @@
         return False
 
     def localpath(self):
+        # type: () -> bytes
         if self.scheme == b'file' or self.scheme == b'bundle':
             path = self.path or b'/'
             # For Windows, we need to promote hosts containing drive
@@ -3262,18 +3295,22 @@
 
 
 def hasscheme(path):
-    return bool(url(path).scheme)
+    # type: (bytes) -> bool
+    return bool(url(path).scheme)  # cast to help pytype
 
 
 def hasdriveletter(path):
-    return path and path[1:2] == b':' and path[0:1].isalpha()
+    # type: (bytes) -> bool
+    return bool(path) and path[1:2] == b':' and path[0:1].isalpha()
 
 
 def urllocalpath(path):
+    # type: (bytes) -> bytes
     return url(path, parsequery=False, parsefragment=False).localpath()
 
 
 def checksafessh(path):
+    # type: (bytes) -> None
     """check if a path / url is a potentially unsafe ssh exploit (SEC)
 
     This is a sanity check for ssh urls. ssh will parse the first item as
@@ -3291,6 +3328,7 @@
 
 
 def hidepassword(u):
+    # type: (bytes) -> bytes
     '''hide user credential in a url string'''
     u = url(u)
     if u.passwd:
@@ -3299,6 +3337,7 @@
 
 
 def removeauth(u):
+    # type: (bytes) -> bytes
     '''remove all authentication information from a url string'''
     u = url(u)
     u.user = u.passwd = None
@@ -3404,6 +3443,7 @@
 
 
 def sizetoint(s):
+    # type: (bytes) -> int
     """Convert a space specifier to a byte count.
 
     >>> sizetoint(b'30')
@@ -3629,6 +3669,7 @@
 
 
 def _estimatememory():
+    # type: () -> Optional[int]
     """Provide an estimate for the available system memory in Bytes.
 
     If no estimate can be provided on the platform, returns None.
@@ -3636,7 +3677,12 @@
     if pycompat.sysplatform.startswith(b'win'):
         # On Windows, use the GlobalMemoryStatusEx kernel function directly.
         from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
-        from ctypes.wintypes import Structure, byref, sizeof, windll
+        from ctypes.wintypes import (  # pytype: disable=import-error
+            Structure,
+            byref,
+            sizeof,
+            windll,
+        )
 
         class MEMORYSTATUSEX(Structure):
             _fields_ = [
--- a/mercurial/utils/compression.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/utils/compression.py	Thu Mar 18 18:24:59 2021 -0400
@@ -685,9 +685,11 @@
         # while providing no worse compression. It strikes a good balance
         # between speed and compression.
         level = opts.get(b'level', 3)
+        # default to single-threaded compression
+        threads = opts.get(b'threads', 0)
 
         zstd = self._module
-        z = zstd.ZstdCompressor(level=level).compressobj()
+        z = zstd.ZstdCompressor(level=level, threads=threads).compressobj()
         for chunk in it:
             data = z.compress(chunk)
             if data:
--- a/mercurial/utils/dateutil.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/utils/dateutil.py	Thu Mar 18 18:24:59 2021 -0400
@@ -18,6 +18,18 @@
     pycompat,
 )
 
+if pycompat.TYPE_CHECKING:
+    from typing import (
+        Callable,
+        Dict,
+        Iterable,
+        Optional,
+        Tuple,
+        Union,
+    )
+
+    hgdate = Tuple[float, int]  # (unixtime, offset)
+
 # used by parsedate
 defaultdateformats = (
     b'%Y-%m-%dT%H:%M:%S',  # the 'real' ISO8601
@@ -62,13 +74,16 @@
 
 
 def makedate(timestamp=None):
+    # type: (Optional[float]) -> hgdate
     """Return a unix timestamp (or the current time) as a (unixtime,
     offset) tuple based off the local timezone."""
     if timestamp is None:
         timestamp = time.time()
     if timestamp < 0:
         hint = _(b"check your clock")
-        raise error.Abort(_(b"negative timestamp: %d") % timestamp, hint=hint)
+        raise error.InputError(
+            _(b"negative timestamp: %d") % timestamp, hint=hint
+        )
     delta = datetime.datetime.utcfromtimestamp(
         timestamp
     ) - datetime.datetime.fromtimestamp(timestamp)
@@ -77,6 +92,7 @@
 
 
 def datestr(date=None, format=b'%a %b %d %H:%M:%S %Y %1%2'):
+    # type: (Optional[hgdate], bytes) -> bytes
     """represent a (unixtime, offset) tuple as a localized time.
     unixtime is seconds since the epoch, and offset is the time zone's
     number of seconds away from UTC.
@@ -114,11 +130,13 @@
 
 
 def shortdate(date=None):
+    # type: (Optional[hgdate]) -> bytes
     """turn (timestamp, tzoff) tuple into iso 8631 date."""
     return datestr(date, format=b'%Y-%m-%d')
 
 
 def parsetimezone(s):
+    # type: (bytes) -> Tuple[Optional[int], bytes]
     """find a trailing timezone, if any, in string, and return a
     (offset, remainder) pair"""
     s = pycompat.bytestr(s)
@@ -154,6 +172,7 @@
 
 
 def strdate(string, format, defaults=None):
+    # type: (bytes, bytes, Optional[Dict[bytes, Tuple[bytes, bytes]]]) -> hgdate
     """parse a localized time string and return a (unixtime, offset) tuple.
     if the string cannot be parsed, ValueError is raised."""
     if defaults is None:
@@ -196,6 +215,7 @@
 
 
 def parsedate(date, formats=None, bias=None):
+    # type: (Union[bytes, hgdate], Optional[Iterable[bytes]], Optional[Dict[bytes, bytes]]) -> hgdate
     """parse a localized date/time and return a (unixtime, offset) tuple.
 
     The date may be a "unixtime offset" string or in one of the specified
@@ -221,8 +241,11 @@
         bias = {}
     if not date:
         return 0, 0
-    if isinstance(date, tuple) and len(date) == 2:
-        return date
+    if isinstance(date, tuple):
+        if len(date) == 2:
+            return date
+        else:
+            raise error.ProgrammingError(b"invalid date format")
     if not formats:
         formats = defaultdateformats
     date = date.strip()
@@ -282,6 +305,7 @@
 
 
 def matchdate(date):
+    # type: (bytes) -> Callable[[float], bool]
     """Return a function that matches a given date match specifier
 
     Formats include:
@@ -311,10 +335,12 @@
     """
 
     def lower(date):
+        # type: (bytes) -> float
         d = {b'mb': b"1", b'd': b"1"}
         return parsedate(date, extendeddateformats, d)[0]
 
     def upper(date):
+        # type: (bytes) -> float
         d = {b'mb': b"12", b'HI': b"23", b'M': b"59", b'S': b"59"}
         for days in (b"31", b"30", b"29"):
             try:
@@ -328,24 +354,26 @@
     date = date.strip()
 
     if not date:
-        raise error.Abort(_(b"dates cannot consist entirely of whitespace"))
+        raise error.InputError(
+            _(b"dates cannot consist entirely of whitespace")
+        )
     elif date[0:1] == b"<":
         if not date[1:]:
-            raise error.Abort(_(b"invalid day spec, use '<DATE'"))
+            raise error.InputError(_(b"invalid day spec, use '<DATE'"))
         when = upper(date[1:])
         return lambda x: x <= when
     elif date[0:1] == b">":
         if not date[1:]:
-            raise error.Abort(_(b"invalid day spec, use '>DATE'"))
+            raise error.InputError(_(b"invalid day spec, use '>DATE'"))
         when = lower(date[1:])
         return lambda x: x >= when
     elif date[0:1] == b"-":
         try:
             days = int(date[1:])
         except ValueError:
-            raise error.Abort(_(b"invalid day spec: %s") % date[1:])
+            raise error.InputError(_(b"invalid day spec: %s") % date[1:])
         if days < 0:
-            raise error.Abort(
+            raise error.InputError(
                 _(b"%s must be nonnegative (see 'hg help dates')") % date[1:]
             )
         when = makedate()[0] - days * 3600 * 24
--- a/mercurial/utils/storageutil.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/utils/storageutil.py	Thu Mar 18 18:24:59 2021 -0400
@@ -23,6 +23,7 @@
     pycompat,
 )
 from ..interfaces import repository
+from ..revlogutils import sidedata as sidedatamod
 from ..utils import hashutil
 
 _nullhash = hashutil.sha1(nullid)
@@ -294,6 +295,7 @@
     deltamode=repository.CG_DELTAMODE_STD,
     revisiondata=False,
     assumehaveparentrevisions=False,
+    sidedata_helpers=None,
 ):
     """Generic implementation of ifiledata.emitrevisions().
 
@@ -356,6 +358,21 @@
     ``nodesorder``
     ``revisiondata``
     ``assumehaveparentrevisions``
+    ``sidedata_helpers`` (optional)
+        If not None, means that sidedata should be included.
+        A dictionary of revlog type to tuples of `(repo, computers, removers)`:
+            * `repo` is used as an argument for computers
+            * `computers` is a list of `(category, (keys, computer)` that
+               compute the missing sidedata categories that were asked:
+               * `category` is the sidedata category
+               * `keys` are the sidedata keys to be affected
+               * `computer` is the function `(repo, store, rev, sidedata)` that
+                 returns a new sidedata dict.
+            * `removers` will remove the keys corresponding to the categories
+              that are present, but not needed.
+        If both `computers` and `removers` are empty, sidedata are simply not
+        transformed.
+        Revlog types are `changelog`, `manifest` or `filelog`.
     """
 
     fnode = store.node
@@ -469,6 +486,17 @@
 
                 available.add(rev)
 
+        sidedata = None
+        if sidedata_helpers:
+            sidedata = store.sidedata(rev)
+            sidedata = run_sidedata_helpers(
+                store=store,
+                sidedata_helpers=sidedata_helpers,
+                sidedata=sidedata,
+                rev=rev,
+            )
+            sidedata = sidedatamod.serialize_sidedata(sidedata)
+
         yield resultcls(
             node=node,
             p1node=fnode(p1rev),
@@ -478,11 +506,31 @@
             baserevisionsize=baserevisionsize,
             revision=revision,
             delta=delta,
+            sidedata=sidedata,
         )
 
         prevrev = rev
 
 
+def run_sidedata_helpers(store, sidedata_helpers, sidedata, rev):
+    """Returns the sidedata for the given revision after running through
+    the given helpers.
+    - `store`: the revlog this applies to (changelog, manifest, or filelog
+      instance)
+    - `sidedata_helpers`: see `storageutil.emitrevisions`
+    - `sidedata`: previous sidedata at the given rev, if any
+    - `rev`: affected rev of `store`
+    """
+    repo, sd_computers, sd_removers = sidedata_helpers
+    kind = store.revlog_kind
+    for _keys, sd_computer in sd_computers.get(kind, []):
+        sidedata = sd_computer(repo, store, rev, sidedata)
+    for keys, _computer in sd_removers.get(kind, []):
+        for key in keys:
+            sidedata.pop(key, None)
+    return sidedata
+
+
 def deltaiscensored(delta, baserev, baselenfn):
     """Determine if a delta represents censored revision data.
 
--- a/mercurial/wireprotov1peer.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/wireprotov1peer.py	Thu Mar 18 18:24:59 2021 -0400
@@ -43,14 +43,14 @@
     @batchable
     def sample(self, one, two=None):
         # Build list of encoded arguments suitable for your wire protocol:
-        encargs = [('one', encode(one),), ('two', encode(two),)]
+        encoded_args = [('one', encode(one),), ('two', encode(two),)]
         # Create future for injection of encoded result:
-        encresref = future()
+        encoded_res_future = future()
         # Return encoded arguments and future:
-        yield encargs, encresref
+        yield encoded_args, encoded_res_future
         # Assuming the future to be filled with the result from the batched
         # request now. Decode it:
-        yield decode(encresref.value)
+        yield decode(encoded_res_future.value)
 
     The decorator returns a function which wraps this coroutine as a plain
     method, but adds the original method as an attribute called "batchable",
@@ -60,12 +60,12 @@
 
     def plain(*args, **opts):
         batchable = f(*args, **opts)
-        encargsorres, encresref = next(batchable)
-        if not encresref:
-            return encargsorres  # a local result in this case
+        encoded_args_or_res, encoded_res_future = next(batchable)
+        if not encoded_res_future:
+            return encoded_args_or_res  # a local result in this case
         self = args[0]
         cmd = pycompat.bytesurl(f.__name__)  # ensure cmd is ascii bytestr
-        encresref.set(self._submitone(cmd, encargsorres))
+        encoded_res_future.set(self._submitone(cmd, encoded_args_or_res))
         return next(batchable)
 
     setattr(plain, 'batchable', f)
@@ -257,15 +257,15 @@
 
             # Encoded arguments and future holding remote result.
             try:
-                encargsorres, fremote = next(batchable)
+                encoded_args_or_res, fremote = next(batchable)
             except Exception:
                 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
                 return
 
             if not fremote:
-                f.set_result(encargsorres)
+                f.set_result(encoded_args_or_res)
             else:
-                requests.append((command, encargsorres))
+                requests.append((command, encoded_args_or_res))
                 states.append((command, f, batchable, fremote))
 
         if not requests:
@@ -310,7 +310,7 @@
                 if not f.done():
                     f.set_exception(
                         error.ResponseError(
-                            _(b'unfulfilled batch command response')
+                            _(b'unfulfilled batch command response'), None
                         )
                     )
 
@@ -322,16 +322,27 @@
         for command, f, batchable, fremote in states:
             # Grab raw result off the wire and teach the internal future
             # about it.
-            remoteresult = next(wireresults)
-            fremote.set(remoteresult)
+            try:
+                remoteresult = next(wireresults)
+            except StopIteration:
+                # This can happen in particular because next(batchable)
+                # in the previous iteration can call peer._abort, which
+                # may close the peer.
+                f.set_exception(
+                    error.ResponseError(
+                        _(b'unfulfilled batch command response'), None
+                    )
+                )
+            else:
+                fremote.set(remoteresult)
 
-            # And ask the coroutine to decode that value.
-            try:
-                result = next(batchable)
-            except Exception:
-                pycompat.future_set_exception_info(f, sys.exc_info()[1:])
-            else:
-                f.set_result(result)
+                # And ask the coroutine to decode that value.
+                try:
+                    result = next(batchable)
+                except Exception:
+                    pycompat.future_set_exception_info(f, sys.exc_info()[1:])
+                else:
+                    f.set_result(result)
 
 
 @interfaceutil.implementer(
--- a/mercurial/wireprotov1server.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/mercurial/wireprotov1server.py	Thu Mar 18 18:24:59 2021 -0400
@@ -27,6 +27,7 @@
     exchange,
     pushkey as pushkeymod,
     pycompat,
+    requirements as requirementsmod,
     streamclone,
     util,
     wireprototypes,
@@ -108,7 +109,7 @@
     4. server.bundle1
     """
     ui = repo.ui
-    gd = b'generaldelta' in repo.requirements
+    gd = requirementsmod.GENERALDELTA_REQUIREMENT in repo.requirements
 
     if gd:
         v = ui.configbool(b'server', b'bundle1gd.%s' % action)
@@ -310,7 +311,7 @@
             caps.append(b'stream-preferred')
         requiredformats = repo.requirements & repo.supportedformats
         # if our local revlogs are just revlogv1, add 'stream' cap
-        if not requiredformats - {b'revlogv1'}:
+        if not requiredformats - {requirementsmod.REVLOGV1_REQUIREMENT}:
             caps.append(b'stream')
         # otherwise, add 'streamreqs' detailing our local revlog format
         else:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pyproject.toml	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,18 @@
+[build-system]
+requires = ["setuptools", "wheel"]
+build-backend = "setuptools.build_meta"
+
+[tool.black]
+line-length = 80
+exclude = '''
+build/
+| wheelhouse/
+| dist/
+| packages/
+| \.hg/
+| \.mypy_cache/
+| \.venv/
+| mercurial/thirdparty/
+'''
+skip-string-normalization = true
+quiet = true
--- a/relnotes/5.7	Sat Mar 13 02:09:23 2021 -0500
+++ b/relnotes/5.7	Thu Mar 18 18:24:59 2021 -0400
@@ -17,6 +17,8 @@
    can be e.g. `rebase`. As part of this effort, the default format
    from `hg rebase` was reorganized a bit.
 
+ * `hg purge` is now a core command using `--confirm` by default.
+
  * `hg diff` and `hg extdiff` now support `--from <rev>` and `--to <rev>`
    arguments as clearer alternatives to `-r <revs>`. `-r <revs>` has been
    deprecated.
@@ -43,6 +45,9 @@
  * The `branchmap` cache is updated more intelligently and can be
    significantly faster for repositories with many branches and changesets.
 
+ * The `rev-branch-cache` is now updated incrementally whenever changesets
+   are added.
+
 
 == New Experimental Features ==
 
@@ -64,4 +69,5 @@
 
 == Internal API Changes ==
 
-
+ * `changelog.branchinfo` is deprecated and will be removed after 5.8.
+   It is superseded by `changelogrevision.branchinfo`.
--- a/relnotes/next	Sat Mar 13 02:09:23 2021 -0500
+++ b/relnotes/next	Thu Mar 18 18:24:59 2021 -0400
@@ -1,8 +1,24 @@
 == New Features ==
+ 
+ * `hg purge` is now a core command using `--confirm` by default.
+ 
+ * The `rev-branch-cache` is now updated incrementally whenever changesets
+   are added.
 
+ * The new options `experimental.bundlecompthreads` and
+   `experimental.bundlecompthreads.<engine>` can be used to instruct
+   the compression engines for bundle operations to use multiple threads
+   for compression. The default is single threaded operation. Currently
+   only supported for zstd.
 
 == New Experimental Features ==
 
+ * There's a new `diff.merge` config option to show the changes
+    relative to an automerge for merge changesets. This makes it
+    easier to detect and review manual changes performed in merge
+    changesets. It is supported by `hg diff --change`, `hg log -p`
+    `hg incoming -p`, and `hg outgoing -p` so far.
+
 
 == Bug Fixes ==
 
@@ -10,7 +26,24 @@
 
 == Backwards Compatibility Changes ==
 
+ * In normal repositories, the first parent of a changeset is not null,
+   unless both parents are null (like the first changeset). Some legacy
+   repositories violate this condition. The revlog code will now
+   silentely swap the parents if this condition is tested. This can
+   change the output of `hg log` when explicitly asking for first or
+   second parent.
+
 
 == Internal API Changes ==
 
+ * `changelog.branchinfo` is deprecated and will be removed after 5.8.
+   It is superseded by `changelogrevision.branchinfo`.
 
+ * Callbacks for revlog.addgroup and the changelog._nodeduplicatecallback hook
+   now get a revision number as argument instead of a node.
+
+ * revlog.addrevision returns the revision number instead of the node.
+
+ * `nodes.nullid` and related constants are being phased out as part of
+   the deprecation of SHA1. Repository instances and related classes
+   provide access via `nodeconstants` and in some cases `nullid` attributes.
--- a/rust/Cargo.lock	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/Cargo.lock	Thu Mar 18 18:24:59 2021 -0400
@@ -4,1009 +4,1081 @@
 name = "adler"
 version = "0.2.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"
 
 [[package]]
 name = "aho-corasick"
 version = "0.7.15"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5"
 dependencies = [
- "memchr 2.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr",
 ]
 
 [[package]]
 name = "ansi_term"
 version = "0.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
 dependencies = [
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi",
 ]
 
 [[package]]
 name = "atty"
 version = "0.2.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
 dependencies = [
- "hermit-abi 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hermit-abi",
+ "libc",
+ "winapi",
 ]
 
 [[package]]
 name = "autocfg"
 version = "1.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
 
 [[package]]
 name = "bitflags"
 version = "1.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
 
 [[package]]
 name = "bitmaps"
 version = "2.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2"
 dependencies = [
- "typenum 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "typenum",
 ]
 
 [[package]]
 name = "byteorder"
 version = "1.3.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
+
+[[package]]
+name = "bytes-cast"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3196ba300c7bc9282a4331e878496cb3e9603a898a8f1446601317163e16ca52"
+dependencies = [
+ "bytes-cast-derive",
+]
+
+[[package]]
+name = "bytes-cast-derive"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cb936af9de38476664d6b58e529aff30d482e4ce1c5e150293d00730b0d81fdb"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
 
 [[package]]
 name = "cc"
 version = "1.0.66"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48"
 dependencies = [
- "jobserver 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)",
+ "jobserver",
 ]
 
 [[package]]
 name = "cfg-if"
 version = "0.1.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
 
 [[package]]
 name = "cfg-if"
 version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "chrono"
+version = "0.4.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73"
+dependencies = [
+ "libc",
+ "num-integer",
+ "num-traits",
+ "time",
+ "winapi",
+]
 
 [[package]]
 name = "clap"
 version = "2.33.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
 dependencies = [
- "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
- "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "vec_map 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ansi_term",
+ "atty",
+ "bitflags",
+ "strsim",
+ "textwrap",
+ "unicode-width",
+ "vec_map",
 ]
 
 [[package]]
 name = "const_fn"
 version = "0.4.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826"
 
 [[package]]
 name = "cpython"
 version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bfaf3847ab963e40c4f6dd8d6be279bdf74007ae2413786a0dcbb28c52139a95"
 dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
- "python27-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "python3-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+ "num-traits",
+ "python27-sys",
+ "python3-sys",
 ]
 
 [[package]]
 name = "crc32fast"
 version = "1.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
 dependencies = [
- "cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 1.0.0",
 ]
 
 [[package]]
 name = "crossbeam-channel"
 version = "0.4.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87"
 dependencies = [
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.7.2",
+ "maybe-uninit",
 ]
 
 [[package]]
 name = "crossbeam-channel"
 version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775"
 dependencies = [
- "cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 1.0.0",
+ "crossbeam-utils 0.8.1",
 ]
 
 [[package]]
 name = "crossbeam-deque"
 version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9"
 dependencies = [
- "cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-epoch 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 1.0.0",
+ "crossbeam-epoch",
+ "crossbeam-utils 0.8.1",
 ]
 
 [[package]]
 name = "crossbeam-epoch"
 version = "0.9.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d"
 dependencies = [
- "cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "const_fn 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "memoffset 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 1.0.0",
+ "const_fn",
+ "crossbeam-utils 0.8.1",
+ "lazy_static",
+ "memoffset",
+ "scopeguard",
 ]
 
 [[package]]
 name = "crossbeam-utils"
 version = "0.7.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
 dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
+ "cfg-if 0.1.10",
+ "lazy_static",
 ]
 
 [[package]]
 name = "crossbeam-utils"
 version = "0.8.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d"
 dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
+ "cfg-if 1.0.0",
+ "lazy_static",
 ]
 
 [[package]]
 name = "ctor"
 version = "0.1.16"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7fbaabec2c953050352311293be5c6aba8e141ba19d6811862b232d6fd020484"
 dependencies = [
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "syn 1.0.54 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "derive_more"
+version = "0.99.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
 ]
 
 [[package]]
 name = "difference"
 version = "2.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198"
 
 [[package]]
 name = "either"
 version = "1.6.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
 
 [[package]]
 name = "env_logger"
 version = "0.7.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36"
 dependencies = [
- "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
- "humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "termcolor 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "atty",
+ "humantime",
+ "log",
+ "regex",
+ "termcolor",
 ]
 
 [[package]]
 name = "flate2"
 version = "1.0.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7411863d55df97a419aa64cb4d2f167103ea9d767e2c54a1868b7ac3f6b47129"
 dependencies = [
- "cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "crc32fast 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "libz-sys 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "miniz_oxide 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 1.0.0",
+ "crc32fast",
+ "libc",
+ "libz-sys",
+ "miniz_oxide",
 ]
 
 [[package]]
 name = "format-bytes"
-version = "0.1.3"
+version = "0.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1c4e89040c7fd7b4e6ba2820ac705a45def8a0c098ec78d170ae88f1ef1d5762"
 dependencies = [
- "format-bytes-macros 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "proc-macro-hack 0.5.19 (registry+https://github.com/rust-lang/crates.io-index)",
+ "format-bytes-macros",
+ "proc-macro-hack",
 ]
 
 [[package]]
 name = "format-bytes-macros"
-version = "0.1.2"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b05089e341a0460449e2210c3bf7b61597860b07f0deae58da38dbed0a4c6b6d"
 dependencies = [
- "proc-macro-hack 0.5.19 (registry+https://github.com/rust-lang/crates.io-index)",
- "proc-macro2 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "syn 1.0.54 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro-hack",
+ "proc-macro2",
+ "quote",
+ "syn",
 ]
 
 [[package]]
 name = "fuchsia-cprng"
 version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
 
 [[package]]
 name = "gcc"
 version = "0.3.55"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
 
 [[package]]
 name = "getrandom"
 version = "0.1.15"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
 dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10",
+ "libc",
+ "wasi 0.9.0+wasi-snapshot-preview1",
 ]
 
 [[package]]
 name = "glob"
 version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
 
 [[package]]
 name = "hermit-abi"
 version = "0.1.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
 dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
 ]
 
 [[package]]
-name = "hex"
-version = "0.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
 name = "hg-core"
 version = "0.1.0"
 dependencies = [
- "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "clap 2.33.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-channel 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "flate2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)",
- "format-bytes 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "im-rc 15.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
- "memchr 2.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "micro-timer 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_distr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_pcg 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
- "same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "twox-hash 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "zstd 0.5.3+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder",
+ "bytes-cast",
+ "clap",
+ "crossbeam-channel 0.4.4",
+ "derive_more",
+ "flate2",
+ "format-bytes",
+ "home",
+ "im-rc",
+ "lazy_static",
+ "log",
+ "memmap",
+ "micro-timer",
+ "pretty_assertions",
+ "rand 0.7.3",
+ "rand_distr",
+ "rand_pcg",
+ "rayon",
+ "regex",
+ "rust-crypto",
+ "same-file",
+ "tempfile",
+ "twox-hash",
+ "zstd",
 ]
 
 [[package]]
 name = "hg-cpython"
 version = "0.1.0"
 dependencies = [
- "cpython 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "hg-core 0.1.0",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cpython",
+ "crossbeam-channel 0.4.4",
+ "env_logger",
+ "hg-core",
+ "libc",
+ "log",
+]
+
+[[package]]
+name = "home"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2456aef2e6b6a9784192ae780c0f15bc57df0e918585282325e8c8ac27737654"
+dependencies = [
+ "winapi",
 ]
 
 [[package]]
 name = "humantime"
 version = "1.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f"
 dependencies = [
- "quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quick-error",
 ]
 
 [[package]]
 name = "im-rc"
 version = "15.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3ca8957e71f04a205cb162508f9326aea04676c8dfd0711220190d6b83664f3f"
 dependencies = [
- "bitmaps 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_xoshiro 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sized-chunks 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "typenum 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "version_check 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bitmaps",
+ "rand_core 0.5.1",
+ "rand_xoshiro",
+ "sized-chunks",
+ "typenum",
+ "version_check",
 ]
 
 [[package]]
 name = "itertools"
 version = "0.9.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
 dependencies = [
- "either 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "either",
 ]
 
 [[package]]
 name = "jobserver"
 version = "0.1.21"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2"
 dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
 ]
 
 [[package]]
 name = "lazy_static"
 version = "1.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
 
 [[package]]
 name = "libc"
 version = "0.2.81"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb"
 
 [[package]]
 name = "libz-sys"
 version = "1.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655"
 dependencies = [
- "cc 1.0.66 (registry+https://github.com/rust-lang/crates.io-index)",
- "pkg-config 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
- "vcpkg 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cc",
+ "pkg-config",
+ "vcpkg",
 ]
 
 [[package]]
 name = "log"
 version = "0.4.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
 dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10",
 ]
 
 [[package]]
 name = "maybe-uninit"
 version = "2.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
 
 [[package]]
 name = "memchr"
 version = "2.3.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525"
 
 [[package]]
 name = "memmap"
 version = "0.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
 dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+ "winapi",
 ]
 
 [[package]]
 name = "memoffset"
 version = "0.6.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87"
 dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
 ]
 
 [[package]]
 name = "micro-timer"
 version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2620153e1d903d26b72b89f0e9c48d8c4756cba941c185461dddc234980c298c"
 dependencies = [
- "micro-timer-macros 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "micro-timer-macros",
+ "scopeguard",
 ]
 
 [[package]]
 name = "micro-timer-macros"
 version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e28a3473e6abd6e9aab36aaeef32ad22ae0bd34e79f376643594c2b152ec1c5d"
 dependencies = [
- "proc-macro2 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "syn 1.0.54 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2",
+ "quote",
+ "scopeguard",
+ "syn",
 ]
 
 [[package]]
 name = "miniz_oxide"
 version = "0.4.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d"
 dependencies = [
- "adler 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "adler",
+ "autocfg",
+]
+
+[[package]]
+name = "num-integer"
+version = "0.1.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db"
+dependencies = [
+ "autocfg",
+ "num-traits",
 ]
 
 [[package]]
 name = "num-traits"
 version = "0.2.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
 dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
 ]
 
 [[package]]
 name = "num_cpus"
 version = "1.13.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
 dependencies = [
- "hermit-abi 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hermit-abi",
+ "libc",
 ]
 
 [[package]]
 name = "output_vt100"
 version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9"
 dependencies = [
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi",
 ]
 
 [[package]]
 name = "pkg-config"
 version = "0.3.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
 
 [[package]]
 name = "ppv-lite86"
 version = "0.2.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
 
 [[package]]
 name = "pretty_assertions"
 version = "0.6.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f81e1644e1b54f5a68959a29aa86cde704219254669da328ecfdf6a1f09d427"
 dependencies = [
- "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "ctor 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
- "difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "output_vt100 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ansi_term",
+ "ctor",
+ "difference",
+ "output_vt100",
 ]
 
 [[package]]
 name = "proc-macro-hack"
 version = "0.5.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
 
 [[package]]
 name = "proc-macro2"
 version = "1.0.24"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
 dependencies = [
- "unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-xid",
 ]
 
 [[package]]
 name = "python27-sys"
 version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "67cb041de8615111bf224dd75667af5f25c6e032118251426fed7f1b70ce4c8c"
 dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+ "regex",
 ]
 
 [[package]]
 name = "python3-sys"
 version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "90af11779515a1e530af60782d273b59ac79d33b0e253c071a728563957c76d4"
 dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+ "regex",
 ]
 
 [[package]]
 name = "quick-error"
 version = "1.2.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
 
 [[package]]
 name = "quote"
 version = "1.0.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
 dependencies = [
- "proc-macro2 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2",
 ]
 
 [[package]]
 name = "rand"
 version = "0.3.23"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c"
 dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+ "rand 0.4.6",
 ]
 
 [[package]]
 name = "rand"
 version = "0.4.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293"
 dependencies = [
- "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fuchsia-cprng",
+ "libc",
+ "rand_core 0.3.1",
+ "rdrand",
+ "winapi",
 ]
 
 [[package]]
 name = "rand"
 version = "0.7.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
 dependencies = [
- "getrandom 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom",
+ "libc",
+ "rand_chacha",
+ "rand_core 0.5.1",
+ "rand_hc",
 ]
 
 [[package]]
 name = "rand_chacha"
 version = "0.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
 dependencies = [
- "ppv-lite86 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ppv-lite86",
+ "rand_core 0.5.1",
 ]
 
 [[package]]
 name = "rand_core"
 version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
 dependencies = [
- "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.4.2",
 ]
 
 [[package]]
 name = "rand_core"
 version = "0.4.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
 
 [[package]]
 name = "rand_core"
 version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
 dependencies = [
- "getrandom 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom",
 ]
 
 [[package]]
 name = "rand_distr"
 version = "0.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2"
 dependencies = [
- "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.7.3",
 ]
 
 [[package]]
 name = "rand_hc"
 version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
 dependencies = [
- "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.5.1",
 ]
 
 [[package]]
 name = "rand_pcg"
 version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429"
 dependencies = [
- "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.5.1",
 ]
 
 [[package]]
 name = "rand_xoshiro"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004"
 dependencies = [
- "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.5.1",
 ]
 
 [[package]]
 name = "rayon"
 version = "1.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674"
 dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-deque 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "either 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon-core 1.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
+ "crossbeam-deque",
+ "either",
+ "rayon-core",
 ]
 
 [[package]]
 name = "rayon-core"
 version = "1.9.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a"
 dependencies = [
- "crossbeam-channel 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-deque 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-channel 0.5.0",
+ "crossbeam-deque",
+ "crossbeam-utils 0.8.1",
+ "lazy_static",
+ "num_cpus",
 ]
 
 [[package]]
 name = "rdrand"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
 dependencies = [
- "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.3.1",
 ]
 
 [[package]]
 name = "redox_syscall"
 version = "0.1.57"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
 
 [[package]]
 name = "regex"
 version = "1.4.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c"
 dependencies = [
- "aho-corasick 0.7.15 (registry+https://github.com/rust-lang/crates.io-index)",
- "memchr 2.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex-syntax 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
- "thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+ "thread_local",
 ]
 
 [[package]]
 name = "regex-syntax"
 version = "0.6.21"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189"
 
 [[package]]
 name = "remove_dir_all"
 version = "0.5.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
 dependencies = [
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi",
 ]
 
 [[package]]
 name = "rhg"
 version = "0.1.0"
 dependencies = [
- "clap 2.33.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "format-bytes 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "hg-core 0.1.0",
- "log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
- "micro-timer 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "chrono",
+ "clap",
+ "derive_more",
+ "env_logger",
+ "format-bytes",
+ "hg-core",
+ "lazy_static",
+ "log",
+ "micro-timer",
+ "regex",
+ "users",
 ]
 
 [[package]]
 name = "rust-crypto"
 version = "0.2.36"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a"
 dependencies = [
- "gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
- "time 0.1.44 (registry+https://github.com/rust-lang/crates.io-index)",
+ "gcc",
+ "libc",
+ "rand 0.3.23",
+ "rustc-serialize",
+ "time",
 ]
 
 [[package]]
 name = "rustc-serialize"
 version = "0.3.24"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
 
 [[package]]
 name = "same-file"
 version = "1.0.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
 dependencies = [
- "winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-util",
 ]
 
 [[package]]
 name = "scopeguard"
 version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
 
 [[package]]
 name = "sized-chunks"
 version = "0.6.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1ec31ceca5644fa6d444cc77548b88b67f46db6f7c71683b0f9336e671830d2f"
 dependencies = [
- "bitmaps 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "typenum 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bitmaps",
+ "typenum",
 ]
 
 [[package]]
 name = "static_assertions"
 version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
 
 [[package]]
 name = "strsim"
 version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
 
 [[package]]
 name = "syn"
 version = "1.0.54"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44"
 dependencies = [
- "proc-macro2 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2",
+ "quote",
+ "unicode-xid",
 ]
 
 [[package]]
 name = "tempfile"
 version = "3.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
 dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "redox_syscall 0.1.57 (registry+https://github.com/rust-lang/crates.io-index)",
- "remove_dir_all 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10",
+ "libc",
+ "rand 0.7.3",
+ "redox_syscall",
+ "remove_dir_all",
+ "winapi",
 ]
 
 [[package]]
 name = "termcolor"
 version = "1.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
 dependencies = [
- "winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-util",
 ]
 
 [[package]]
 name = "textwrap"
 version = "0.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
 dependencies = [
- "unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-width",
 ]
 
 [[package]]
 name = "thread_local"
 version = "1.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
 dependencies = [
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static",
 ]
 
 [[package]]
 name = "time"
 version = "0.1.44"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255"
 dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasi 0.10.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+ "wasi 0.10.0+wasi-snapshot-preview1",
+ "winapi",
 ]
 
 [[package]]
 name = "twox-hash"
 version = "1.6.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59"
 dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "static_assertions 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10",
+ "rand 0.7.3",
+ "static_assertions",
 ]
 
 [[package]]
 name = "typenum"
 version = "1.12.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33"
 
 [[package]]
 name = "unicode-width"
 version = "0.1.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
 
 [[package]]
 name = "unicode-xid"
 version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
+
+[[package]]
+name = "users"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24cc0f6d6f267b73e5a2cadf007ba8f9bc39c6a6f9666f8cf25ea809a153b032"
+dependencies = [
+ "libc",
+ "log",
+]
 
 [[package]]
 name = "vcpkg"
 version = "0.2.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb"
 
 [[package]]
 name = "vec_map"
 version = "0.8.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
 
 [[package]]
 name = "version_check"
 version = "0.9.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed"
 
 [[package]]
 name = "wasi"
 version = "0.9.0+wasi-snapshot-preview1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
 
 [[package]]
 name = "wasi"
 version = "0.10.0+wasi-snapshot-preview1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
 
 [[package]]
 name = "winapi"
 version = "0.3.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
 dependencies = [
- "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
 ]
 
 [[package]]
 name = "winapi-i686-pc-windows-gnu"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
 
 [[package]]
 name = "winapi-util"
 version = "0.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
 dependencies = [
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi",
 ]
 
 [[package]]
 name = "winapi-x86_64-pc-windows-gnu"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
 
 [[package]]
 name = "zstd"
 version = "0.5.3+zstd.1.4.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01b32eaf771efa709e8308605bbf9319bf485dc1503179ec0469b611937c0cd8"
 dependencies = [
- "zstd-safe 2.0.5+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "zstd-safe",
 ]
 
 [[package]]
 name = "zstd-safe"
 version = "2.0.5+zstd.1.4.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1cfb642e0d27f64729a639c52db457e0ae906e7bc6f5fe8f5c453230400f1055"
 dependencies = [
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
- "zstd-sys 1.4.17+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+ "zstd-sys",
 ]
 
 [[package]]
 name = "zstd-sys"
 version = "1.4.17+zstd.1.4.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b89249644df056b522696b1bb9e7c18c87e8ffa3e2f0dc3b0155875d6498f01b"
 dependencies = [
- "cc 1.0.66 (registry+https://github.com/rust-lang/crates.io-index)",
- "glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "itertools 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cc",
+ "glob",
+ "itertools",
+ "libc",
 ]
-
-[metadata]
-"checksum adler 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"
-"checksum aho-corasick 0.7.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5"
-"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
-"checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
-"checksum autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
-"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
-"checksum bitmaps 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2"
-"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
-"checksum cc 1.0.66 (registry+https://github.com/rust-lang/crates.io-index)" = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48"
-"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
-"checksum cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
-"checksum clap 2.33.3 (registry+https://github.com/rust-lang/crates.io-index)" = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
-"checksum const_fn 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826"
-"checksum cpython 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bfaf3847ab963e40c4f6dd8d6be279bdf74007ae2413786a0dcbb28c52139a95"
-"checksum crc32fast 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
-"checksum crossbeam-channel 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87"
-"checksum crossbeam-channel 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775"
-"checksum crossbeam-deque 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9"
-"checksum crossbeam-epoch 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d"
-"checksum crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
-"checksum crossbeam-utils 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d"
-"checksum ctor 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "7fbaabec2c953050352311293be5c6aba8e141ba19d6811862b232d6fd020484"
-"checksum difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198"
-"checksum either 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
-"checksum env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36"
-"checksum flate2 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)" = "7411863d55df97a419aa64cb4d2f167103ea9d767e2c54a1868b7ac3f6b47129"
-"checksum format-bytes 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1a7374eb574cd29ae45878554298091c554c3286a17b3afa440a3e2710ae0790"
-"checksum format-bytes-macros 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4edcc04201cea17a0e6b937adebd46b93fba09924c7e6ed8c515a35ce8432cbc"
-"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
-"checksum gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)" = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
-"checksum getrandom 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)" = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
-"checksum glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
-"checksum hermit-abi 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
-"checksum hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35"
-"checksum humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f"
-"checksum im-rc 15.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3ca8957e71f04a205cb162508f9326aea04676c8dfd0711220190d6b83664f3f"
-"checksum itertools 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
-"checksum jobserver 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)" = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2"
-"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
-"checksum libc 0.2.81 (registry+https://github.com/rust-lang/crates.io-index)" = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb"
-"checksum libz-sys 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655"
-"checksum log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
-"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
-"checksum memchr 2.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525"
-"checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
-"checksum memoffset 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87"
-"checksum micro-timer 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2620153e1d903d26b72b89f0e9c48d8c4756cba941c185461dddc234980c298c"
-"checksum micro-timer-macros 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e28a3473e6abd6e9aab36aaeef32ad22ae0bd34e79f376643594c2b152ec1c5d"
-"checksum miniz_oxide 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d"
-"checksum num-traits 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
-"checksum num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
-"checksum output_vt100 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9"
-"checksum pkg-config 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)" = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
-"checksum ppv-lite86 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
-"checksum pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3f81e1644e1b54f5a68959a29aa86cde704219254669da328ecfdf6a1f09d427"
-"checksum proc-macro-hack 0.5.19 (registry+https://github.com/rust-lang/crates.io-index)" = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
-"checksum proc-macro2 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)" = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
-"checksum python27-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "67cb041de8615111bf224dd75667af5f25c6e032118251426fed7f1b70ce4c8c"
-"checksum python3-sys 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90af11779515a1e530af60782d273b59ac79d33b0e253c071a728563957c76d4"
-"checksum quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
-"checksum quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
-"checksum rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)" = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c"
-"checksum rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293"
-"checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
-"checksum rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
-"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
-"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
-"checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
-"checksum rand_distr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2"
-"checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
-"checksum rand_pcg 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429"
-"checksum rand_xoshiro 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004"
-"checksum rayon 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674"
-"checksum rayon-core 1.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a"
-"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
-"checksum redox_syscall 0.1.57 (registry+https://github.com/rust-lang/crates.io-index)" = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
-"checksum regex 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c"
-"checksum regex-syntax 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189"
-"checksum remove_dir_all 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
-"checksum rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a"
-"checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
-"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
-"checksum scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
-"checksum sized-chunks 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1ec31ceca5644fa6d444cc77548b88b67f46db6f7c71683b0f9336e671830d2f"
-"checksum static_assertions 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
-"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
-"checksum syn 1.0.54 (registry+https://github.com/rust-lang/crates.io-index)" = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44"
-"checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
-"checksum termcolor 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
-"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
-"checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
-"checksum time 0.1.44 (registry+https://github.com/rust-lang/crates.io-index)" = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255"
-"checksum twox-hash 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59"
-"checksum typenum 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33"
-"checksum unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
-"checksum unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
-"checksum vcpkg 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb"
-"checksum vec_map 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
-"checksum version_check 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed"
-"checksum wasi 0.10.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
-"checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
-"checksum winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
-"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
-"checksum winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
-"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
-"checksum zstd 0.5.3+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "01b32eaf771efa709e8308605bbf9319bf485dc1503179ec0469b611937c0cd8"
-"checksum zstd-safe 2.0.5+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "1cfb642e0d27f64729a639c52db457e0ae906e7bc6f5fe8f5c453230400f1055"
-"checksum zstd-sys 1.4.17+zstd.1.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b89249644df056b522696b1bb9e7c18c87e8ffa3e2f0dc3b0155875d6498f01b"
--- a/rust/hg-core/Cargo.toml	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/Cargo.toml	Thu Mar 18 18:24:59 2021 -0400
@@ -9,11 +9,12 @@
 name = "hg"
 
 [dependencies]
+bytes-cast = "0.1"
 byteorder = "1.3.4"
-hex = "0.4.2"
+derive_more = "0.99"
+home = "0.5"
 im-rc = "15.0.*"
 lazy_static = "1.4.0"
-memchr = "2.3.3"
 rand = "0.7.3"
 rand_pcg = "0.2.1"
 rand_distr = "0.2.2"
@@ -27,7 +28,7 @@
 memmap = "0.7.0"
 zstd = "0.5.3"
 rust-crypto = "0.2.36"
-format-bytes = "0.1.2"
+format-bytes = "0.2.2"
 
 # We don't use the `miniz-oxide` backend to not change rhg benchmarks and until
 # we have a clearer view of which backend is the fastest.
--- a/rust/hg-core/examples/nodemap/main.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/examples/nodemap/main.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -49,7 +49,7 @@
 
 fn query(index: &Index, nm: &NodeTree, prefix: &str) {
     let start = Instant::now();
-    let res = nm.find_hex(index, prefix);
+    let res = NodePrefix::from_hex(prefix).map(|p| nm.find_bin(index, p));
     println!("Result found in {:?}: {:?}", start.elapsed(), res);
 }
 
@@ -66,7 +66,7 @@
         .collect();
     if queries < 10 {
         let nodes_hex: Vec<String> =
-            nodes.iter().map(|n| n.encode_hex()).collect();
+            nodes.iter().map(|n| format!("{:x}", n)).collect();
         println!("Nodes: {:?}", nodes_hex);
     }
     let mut last: Option<Revision> = None;
@@ -76,11 +76,11 @@
     }
     let elapsed = start.elapsed();
     println!(
-        "Did {} queries in {:?} (mean {:?}), last was {:?} with result {:?}",
+        "Did {} queries in {:?} (mean {:?}), last was {:x} with result {:?}",
         queries,
         elapsed,
         elapsed / (queries as u32),
-        nodes.last().unwrap().encode_hex(),
+        nodes.last().unwrap(),
         last
     );
 }
--- a/rust/hg-core/src/config.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/config.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -11,4 +11,6 @@
 
 mod config;
 mod layer;
-pub use config::Config;
+mod values;
+pub use config::{Config, ConfigValueParseError};
+pub use layer::{ConfigError, ConfigParseError};
--- a/rust/hg-core/src/config/config.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/config/config.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -8,25 +8,43 @@
 // GNU General Public License version 2 or any later version.
 
 use super::layer;
-use crate::config::layer::{ConfigError, ConfigLayer, ConfigValue};
-use std::path::PathBuf;
+use super::values;
+use crate::config::layer::{
+    ConfigError, ConfigLayer, ConfigOrigin, ConfigValue,
+};
+use crate::utils::files::get_bytes_from_os_str;
+use crate::utils::SliceExt;
+use format_bytes::{write_bytes, DisplayBytes};
+use std::collections::HashSet;
+use std::env;
+use std::path::{Path, PathBuf};
+use std::str;
 
-use crate::operations::find_root;
-use crate::utils::files::read_whole_file;
+use crate::errors::{HgResultExt, IoResultExt};
 
 /// Holds the config values for the current repository
 /// TODO update this docstring once we support more sources
+#[derive(Clone)]
 pub struct Config {
     layers: Vec<layer::ConfigLayer>,
 }
 
-impl std::fmt::Debug for Config {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+impl DisplayBytes for Config {
+    fn display_bytes(
+        &self,
+        out: &mut dyn std::io::Write,
+    ) -> std::io::Result<()> {
         for (index, layer) in self.layers.iter().rev().enumerate() {
-            write!(
-                f,
-                "==== Layer {} (trusted: {}) ====\n{:?}",
-                index, layer.trusted, layer
+            write_bytes!(
+                out,
+                b"==== Layer {} (trusted: {}) ====\n{}",
+                index,
+                if layer.trusted {
+                    &b"yes"[..]
+                } else {
+                    &b"no"[..]
+                },
+                layer
             )?;
         }
         Ok(())
@@ -40,15 +58,161 @@
     Parsed(layer::ConfigLayer),
 }
 
-pub fn parse_bool(v: &[u8]) -> Option<bool> {
-    match v.to_ascii_lowercase().as_slice() {
-        b"1" | b"yes" | b"true" | b"on" | b"always" => Some(true),
-        b"0" | b"no" | b"false" | b"off" | b"never" => Some(false),
-        _ => None,
-    }
+#[derive(Debug)]
+pub struct ConfigValueParseError {
+    pub origin: ConfigOrigin,
+    pub line: Option<usize>,
+    pub section: Vec<u8>,
+    pub item: Vec<u8>,
+    pub value: Vec<u8>,
+    pub expected_type: &'static str,
 }
 
 impl Config {
+    /// Load system and user configuration from various files.
+    ///
+    /// This is also affected by some environment variables.
+    pub fn load(
+        cli_config_args: impl IntoIterator<Item = impl AsRef<[u8]>>,
+    ) -> Result<Self, ConfigError> {
+        let mut config = Self { layers: Vec::new() };
+        let opt_rc_path = env::var_os("HGRCPATH");
+        // HGRCPATH replaces system config
+        if opt_rc_path.is_none() {
+            config.add_system_config()?
+        }
+
+        config.add_for_environment_variable("EDITOR", b"ui", b"editor");
+        config.add_for_environment_variable("VISUAL", b"ui", b"editor");
+        config.add_for_environment_variable("PAGER", b"pager", b"pager");
+
+        // These are set by `run-tests.py --rhg` to enable fallback for the
+        // entire test suite. Alternatives would be setting configuration
+        // through `$HGRCPATH` but some tests override that, or changing the
+        // `hg` shell alias to include `--config` but that disrupts tests that
+        // print command lines and check expected output.
+        config.add_for_environment_variable(
+            "RHG_ON_UNSUPPORTED",
+            b"rhg",
+            b"on-unsupported",
+        );
+        config.add_for_environment_variable(
+            "RHG_FALLBACK_EXECUTABLE",
+            b"rhg",
+            b"fallback-executable",
+        );
+
+        // HGRCPATH replaces user config
+        if opt_rc_path.is_none() {
+            config.add_user_config()?
+        }
+        if let Some(rc_path) = &opt_rc_path {
+            for path in env::split_paths(rc_path) {
+                if !path.as_os_str().is_empty() {
+                    if path.is_dir() {
+                        config.add_trusted_dir(&path)?
+                    } else {
+                        config.add_trusted_file(&path)?
+                    }
+                }
+            }
+        }
+        if let Some(layer) = ConfigLayer::parse_cli_args(cli_config_args)? {
+            config.layers.push(layer)
+        }
+        Ok(config)
+    }
+
+    fn add_trusted_dir(&mut self, path: &Path) -> Result<(), ConfigError> {
+        if let Some(entries) = std::fs::read_dir(path)
+            .when_reading_file(path)
+            .io_not_found_as_none()?
+        {
+            let mut file_paths = entries
+                .map(|result| {
+                    result.when_reading_file(path).map(|entry| entry.path())
+                })
+                .collect::<Result<Vec<_>, _>>()?;
+            file_paths.sort();
+            for file_path in &file_paths {
+                if file_path.extension() == Some(std::ffi::OsStr::new("rc")) {
+                    self.add_trusted_file(&file_path)?
+                }
+            }
+        }
+        Ok(())
+    }
+
+    fn add_trusted_file(&mut self, path: &Path) -> Result<(), ConfigError> {
+        if let Some(data) = std::fs::read(path)
+            .when_reading_file(path)
+            .io_not_found_as_none()?
+        {
+            self.layers.extend(ConfigLayer::parse(path, &data)?)
+        }
+        Ok(())
+    }
+
+    fn add_for_environment_variable(
+        &mut self,
+        var: &str,
+        section: &[u8],
+        key: &[u8],
+    ) {
+        if let Some(value) = env::var_os(var) {
+            let origin = layer::ConfigOrigin::Environment(var.into());
+            let mut layer = ConfigLayer::new(origin);
+            layer.add(
+                section.to_owned(),
+                key.to_owned(),
+                get_bytes_from_os_str(value),
+                None,
+            );
+            self.layers.push(layer)
+        }
+    }
+
+    #[cfg(unix)] // TODO: other platforms
+    fn add_system_config(&mut self) -> Result<(), ConfigError> {
+        let mut add_for_prefix = |prefix: &Path| -> Result<(), ConfigError> {
+            let etc = prefix.join("etc").join("mercurial");
+            self.add_trusted_file(&etc.join("hgrc"))?;
+            self.add_trusted_dir(&etc.join("hgrc.d"))
+        };
+        let root = Path::new("/");
+        // TODO: use `std::env::args_os().next().unwrap()` a.k.a. argv[0]
+        // instead? TODO: can this be a relative path?
+        let hg = crate::utils::current_exe()?;
+        // TODO: this order (per-installation then per-system) matches
+        // `systemrcpath()` in `mercurial/scmposix.py`, but
+        // `mercurial/helptext/config.txt` suggests it should be reversed
+        if let Some(installation_prefix) = hg.parent().and_then(Path::parent) {
+            if installation_prefix != root {
+                add_for_prefix(&installation_prefix)?
+            }
+        }
+        add_for_prefix(root)?;
+        Ok(())
+    }
+
+    #[cfg(unix)] // TODO: other plateforms
+    fn add_user_config(&mut self) -> Result<(), ConfigError> {
+        let opt_home = home::home_dir();
+        if let Some(home) = &opt_home {
+            self.add_trusted_file(&home.join(".hgrc"))?
+        }
+        let darwin = cfg!(any(target_os = "macos", target_os = "ios"));
+        if !darwin {
+            if let Some(config_home) = env::var_os("XDG_CONFIG_HOME")
+                .map(PathBuf::from)
+                .or_else(|| opt_home.map(|home| home.join(".config")))
+            {
+                self.add_trusted_file(&config_home.join("hg").join("hgrc"))?
+            }
+        }
+        Ok(())
+    }
+
     /// Loads in order, which means that the precedence is the same
     /// as the order of `sources`.
     pub fn load_from_explicit_sources(
@@ -62,7 +226,7 @@
                 ConfigSource::AbsPath(c) => {
                     // TODO check if it should be trusted
                     // mercurial/ui.py:427
-                    let data = match read_whole_file(&c) {
+                    let data = match std::fs::read(&c) {
                         Err(_) => continue, // same as the python code
                         Ok(data) => data,
                     };
@@ -74,13 +238,86 @@
         Ok(Config { layers })
     }
 
-    /// Loads the local config. In a future version, this will also load the
-    /// `$HOME/.hgrc` and more to mirror the Python implementation.
-    pub fn load() -> Result<Self, ConfigError> {
-        let root = find_root().unwrap();
-        Ok(Self::load_from_explicit_sources(vec![
-            ConfigSource::AbsPath(root.join(".hg/hgrc")),
-        ])?)
+    /// Loads the per-repository config into a new `Config` which is combined
+    /// with `self`.
+    pub(crate) fn combine_with_repo(
+        &self,
+        repo_config_files: &[PathBuf],
+    ) -> Result<Self, ConfigError> {
+        let (cli_layers, other_layers) = self
+            .layers
+            .iter()
+            .cloned()
+            .partition(ConfigLayer::is_from_command_line);
+
+        let mut repo_config = Self {
+            layers: other_layers,
+        };
+        for path in repo_config_files {
+            // TODO: check if this file should be trusted:
+            // `mercurial/ui.py:427`
+            repo_config.add_trusted_file(path)?;
+        }
+        repo_config.layers.extend(cli_layers);
+        Ok(repo_config)
+    }
+
+    fn get_parse<'config, T: 'config>(
+        &'config self,
+        section: &[u8],
+        item: &[u8],
+        expected_type: &'static str,
+        parse: impl Fn(&'config [u8]) -> Option<T>,
+    ) -> Result<Option<T>, ConfigValueParseError> {
+        match self.get_inner(&section, &item) {
+            Some((layer, v)) => match parse(&v.bytes) {
+                Some(b) => Ok(Some(b)),
+                None => Err(ConfigValueParseError {
+                    origin: layer.origin.to_owned(),
+                    line: v.line,
+                    value: v.bytes.to_owned(),
+                    section: section.to_owned(),
+                    item: item.to_owned(),
+                    expected_type,
+                }),
+            },
+            None => Ok(None),
+        }
+    }
+
+    /// Returns an `Err` if the first value found is not a valid UTF-8 string.
+    /// Otherwise, returns an `Ok(value)` if found, or `None`.
+    pub fn get_str(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Result<Option<&str>, ConfigValueParseError> {
+        self.get_parse(section, item, "ASCII or UTF-8 string", |value| {
+            str::from_utf8(value).ok()
+        })
+    }
+
+    /// Returns an `Err` if the first value found is not a valid unsigned
+    /// integer. Otherwise, returns an `Ok(value)` if found, or `None`.
+    pub fn get_u32(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Result<Option<u32>, ConfigValueParseError> {
+        self.get_parse(section, item, "valid integer", |value| {
+            str::from_utf8(value).ok()?.parse().ok()
+        })
+    }
+
+    /// Returns an `Err` if the first value found is not a valid file size
+    /// value such as `30` (default unit is bytes), `7 MB`, or `42.5 kb`.
+    /// Otherwise, returns an `Ok(value_in_bytes)` if found, or `None`.
+    pub fn get_byte_size(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Result<Option<u64>, ConfigValueParseError> {
+        self.get_parse(section, item, "byte quantity", values::parse_byte_size)
     }
 
     /// Returns an `Err` if the first value found is not a valid boolean.
@@ -90,18 +327,8 @@
         &self,
         section: &[u8],
         item: &[u8],
-    ) -> Result<Option<bool>, ConfigError> {
-        match self.get_inner(&section, &item) {
-            Some((layer, v)) => match parse_bool(&v.bytes) {
-                Some(b) => Ok(Some(b)),
-                None => Err(ConfigError::Parse {
-                    origin: layer.origin.to_owned(),
-                    line: v.line,
-                    bytes: v.bytes.to_owned(),
-                }),
-            },
-            None => Ok(None),
-        }
+    ) -> Result<Option<bool>, ConfigValueParseError> {
+        self.get_parse(section, item, "boolean", values::parse_bool)
     }
 
     /// Returns the corresponding boolean in the config. Returns `Ok(false)`
@@ -110,10 +337,35 @@
         &self,
         section: &[u8],
         item: &[u8],
-    ) -> Result<bool, ConfigError> {
+    ) -> Result<bool, ConfigValueParseError> {
         Ok(self.get_option(section, item)?.unwrap_or(false))
     }
 
+    /// Returns the corresponding list-value in the config if found, or `None`.
+    ///
+    /// This is appropriate for new configuration keys. The value syntax is
+    /// **not** the same as most existing list-valued config, which has Python
+    /// parsing implemented in `parselist()` in `mercurial/config.py`.
+    /// Faithfully porting that parsing algorithm to Rust (including behavior
+    /// that are arguably bugs) turned out to be non-trivial and hasn’t been
+    /// completed as of this writing.
+    ///
+    /// Instead, the "simple" syntax is: split on comma, then trim leading and
+    /// trailing whitespace of each component. Quotes or backslashes are not
+    /// interpreted in any way. Commas are mandatory between values. Values
+    /// that contain a comma are not supported.
+    pub fn get_simple_list(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Option<impl Iterator<Item = &[u8]>> {
+        self.get(section, item).map(|value| {
+            value
+                .split(|&byte| byte == b',')
+                .map(|component| component.trim())
+        })
+    }
+
     /// Returns the raw value bytes of the first one found, or `None`.
     pub fn get(&self, section: &[u8], item: &[u8]) -> Option<&[u8]> {
         self.get_inner(section, item)
@@ -137,6 +389,14 @@
         None
     }
 
+    /// Return all keys defined for the given section
+    pub fn get_section_keys(&self, section: &[u8]) -> HashSet<&[u8]> {
+        self.layers
+            .iter()
+            .flat_map(|layer| layer.iter_keys(section))
+            .collect()
+    }
+
     /// Get raw values bytes from all layers (even untrusted ones) in order
     /// of precedence.
     #[cfg(test)]
@@ -169,15 +429,14 @@
         let base_config_path = tmpdir_path.join("base.rc");
         let mut config_file = File::create(&base_config_path).unwrap();
         let data =
-            b"[section]\nitem=value0\n%include included.rc\nitem=value2";
+            b"[section]\nitem=value0\n%include included.rc\nitem=value2\n\
+              [section2]\ncount = 4\nsize = 1.5 KB\nnot-count = 1.5\nnot-size = 1 ub";
         config_file.write_all(data).unwrap();
 
         let sources = vec![ConfigSource::AbsPath(base_config_path)];
         let config = Config::load_from_explicit_sources(sources)
             .expect("expected valid config");
 
-        dbg!(&config);
-
         let (_, value) = config.get_inner(b"section", b"item").unwrap();
         assert_eq!(
             value,
@@ -193,5 +452,13 @@
             config.get_all(b"section", b"item"),
             [b"value2", b"value1", b"value0"]
         );
+
+        assert_eq!(config.get_u32(b"section2", b"count").unwrap(), Some(4));
+        assert_eq!(
+            config.get_byte_size(b"section2", b"size").unwrap(),
+            Some(1024 + 512)
+        );
+        assert!(config.get_u32(b"section2", b"not-count").is_err());
+        assert!(config.get_byte_size(b"section2", b"not-size").is_err());
     }
 }
--- a/rust/hg-core/src/config/layer.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/config/layer.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -7,14 +7,12 @@
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
 
-use crate::utils::files::{
-    get_bytes_from_path, get_path_from_bytes, read_whole_file,
-};
-use format_bytes::format_bytes;
+use crate::errors::HgError;
+use crate::utils::files::{get_bytes_from_path, get_path_from_bytes};
+use format_bytes::{format_bytes, write_bytes, DisplayBytes};
 use lazy_static::lazy_static;
 use regex::bytes::Regex;
 use std::collections::HashMap;
-use std::io;
 use std::path::{Path, PathBuf};
 
 lazy_static! {
@@ -53,6 +51,51 @@
         }
     }
 
+    /// Parse `--config` CLI arguments and return a layer if there’s any
+    pub(crate) fn parse_cli_args(
+        cli_config_args: impl IntoIterator<Item = impl AsRef<[u8]>>,
+    ) -> Result<Option<Self>, ConfigError> {
+        fn parse_one(arg: &[u8]) -> Option<(Vec<u8>, Vec<u8>, Vec<u8>)> {
+            use crate::utils::SliceExt;
+
+            let (section_and_item, value) = arg.split_2(b'=')?;
+            let (section, item) = section_and_item.trim().split_2(b'.')?;
+            Some((
+                section.to_owned(),
+                item.to_owned(),
+                value.trim().to_owned(),
+            ))
+        }
+
+        let mut layer = Self::new(ConfigOrigin::CommandLine);
+        for arg in cli_config_args {
+            let arg = arg.as_ref();
+            if let Some((section, item, value)) = parse_one(arg) {
+                layer.add(section, item, value, None);
+            } else {
+                Err(HgError::abort(format!(
+                    "abort: malformed --config option: '{}' \
+                    (use --config section.name=value)",
+                    String::from_utf8_lossy(arg),
+                )))?
+            }
+        }
+        if layer.sections.is_empty() {
+            Ok(None)
+        } else {
+            Ok(Some(layer))
+        }
+    }
+
+    /// Returns whether this layer comes from `--config` CLI arguments
+    pub(crate) fn is_from_command_line(&self) -> bool {
+        if let ConfigOrigin::CommandLine = self.origin {
+            true
+        } else {
+            false
+        }
+    }
+
     /// Add an entry to the config, overwriting the old one if already present.
     pub fn add(
         &mut self,
@@ -72,6 +115,14 @@
         Some(self.sections.get(section)?.get(item)?)
     }
 
+    /// Returns the keys defined in the given section
+    pub fn iter_keys(&self, section: &[u8]) -> impl Iterator<Item = &[u8]> {
+        self.sections
+            .get(section)
+            .into_iter()
+            .flat_map(|section| section.keys().map(|vec| &**vec))
+    }
+
     pub fn is_empty(&self) -> bool {
         self.sections.is_empty()
     }
@@ -96,21 +147,39 @@
         let mut section = b"".to_vec();
 
         while let Some((index, bytes)) = lines_iter.next() {
+            let line = Some(index + 1);
             if let Some(m) = INCLUDE_RE.captures(&bytes) {
                 let filename_bytes = &m[1];
-                let filename_to_include = get_path_from_bytes(&filename_bytes);
-                match read_include(&src, &filename_to_include) {
-                    (include_src, Ok(data)) => {
+                let filename_bytes = crate::utils::expand_vars(filename_bytes);
+                // `Path::parent` only fails for the root directory,
+                // which `src` can’t be since we’ve managed to open it as a
+                // file.
+                let dir = src
+                    .parent()
+                    .expect("Path::parent fail on a file we’ve read");
+                // `Path::join` with an absolute argument correctly ignores the
+                // base path
+                let filename = dir.join(&get_path_from_bytes(&filename_bytes));
+                match std::fs::read(&filename) {
+                    Ok(data) => {
                         layers.push(current_layer);
-                        layers.extend(Self::parse(&include_src, &data)?);
+                        layers.extend(Self::parse(&filename, &data)?);
                         current_layer =
                             Self::new(ConfigOrigin::File(src.to_owned()));
                     }
-                    (_, Err(e)) => {
-                        return Err(ConfigError::IncludeError {
-                            path: filename_to_include.to_owned(),
-                            io_error: e,
-                        })
+                    Err(error) => {
+                        if error.kind() != std::io::ErrorKind::NotFound {
+                            return Err(ConfigParseError {
+                                origin: ConfigOrigin::File(src.to_owned()),
+                                line,
+                                message: format_bytes!(
+                                    b"cannot include {} ({})",
+                                    filename_bytes,
+                                    format_bytes::Utf8(error)
+                                ),
+                            }
+                            .into());
+                        }
                     }
                 }
             } else if let Some(_) = EMPTY_RE.captures(&bytes) {
@@ -134,22 +203,23 @@
                     };
                     lines_iter.next();
                 }
-                current_layer.add(
-                    section.clone(),
-                    item,
-                    value,
-                    Some(index + 1),
-                );
+                current_layer.add(section.clone(), item, value, line);
             } else if let Some(m) = UNSET_RE.captures(&bytes) {
                 if let Some(map) = current_layer.sections.get_mut(&section) {
                     map.remove(&m[1]);
                 }
             } else {
-                return Err(ConfigError::Parse {
+                let message = if bytes.starts_with(b" ") {
+                    format_bytes!(b"unexpected leading whitespace: {}", bytes)
+                } else {
+                    bytes.to_owned()
+                };
+                return Err(ConfigParseError {
                     origin: ConfigOrigin::File(src.to_owned()),
-                    line: Some(index + 1),
-                    bytes: bytes.to_owned(),
-                });
+                    line,
+                    message,
+                }
+                .into());
             }
         }
         if !current_layer.is_empty() {
@@ -159,8 +229,11 @@
     }
 }
 
-impl std::fmt::Debug for ConfigLayer {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+impl DisplayBytes for ConfigLayer {
+    fn display_bytes(
+        &self,
+        out: &mut dyn std::io::Write,
+    ) -> std::io::Result<()> {
         let mut sections: Vec<_> = self.sections.iter().collect();
         sections.sort_by(|e0, e1| e0.0.cmp(e1.0));
 
@@ -169,16 +242,13 @@
             items.sort_by(|e0, e1| e0.0.cmp(e1.0));
 
             for (item, config_entry) in items {
-                writeln!(
-                    f,
-                    "{}",
-                    String::from_utf8_lossy(&format_bytes!(
-                        b"{}.{}={} # {}",
-                        section,
-                        item,
-                        &config_entry.bytes,
-                        &self.origin.to_bytes(),
-                    ))
+                write_bytes!(
+                    out,
+                    b"{}.{}={} # {}\n",
+                    section,
+                    item,
+                    &config_entry.bytes,
+                    &self.origin,
                 )?
             }
         }
@@ -205,9 +275,11 @@
 
 #[derive(Clone, Debug)]
 pub enum ConfigOrigin {
-    /// The value comes from a configuration file
+    /// From a configuration file
     File(PathBuf),
-    /// The value comes from the environment like `$PAGER` or `$EDITOR`
+    /// From a `--config` CLI argument
+    CommandLine,
+    /// From environment variables like `$PAGER` or `$EDITOR`
     Environment(Vec<u8>),
     /* TODO cli
      * TODO defaults (configitems.py)
@@ -216,53 +288,32 @@
      * Others? */
 }
 
-impl ConfigOrigin {
-    /// TODO use some kind of dedicated trait?
-    pub fn to_bytes(&self) -> Vec<u8> {
+impl DisplayBytes for ConfigOrigin {
+    fn display_bytes(
+        &self,
+        out: &mut dyn std::io::Write,
+    ) -> std::io::Result<()> {
         match self {
-            ConfigOrigin::File(p) => get_bytes_from_path(p),
-            ConfigOrigin::Environment(e) => e.to_owned(),
+            ConfigOrigin::File(p) => out.write_all(&get_bytes_from_path(p)),
+            ConfigOrigin::CommandLine => out.write_all(b"--config"),
+            ConfigOrigin::Environment(e) => write_bytes!(out, b"${}", e),
         }
     }
 }
 
 #[derive(Debug)]
-pub enum ConfigError {
-    Parse {
-        origin: ConfigOrigin,
-        line: Option<usize>,
-        bytes: Vec<u8>,
-    },
-    /// Failed to include a sub config file
-    IncludeError {
-        path: PathBuf,
-        io_error: std::io::Error,
-    },
-    /// Any IO error that isn't expected
-    IO(std::io::Error),
+pub struct ConfigParseError {
+    pub origin: ConfigOrigin,
+    pub line: Option<usize>,
+    pub message: Vec<u8>,
 }
 
-impl From<std::io::Error> for ConfigError {
-    fn from(e: std::io::Error) -> Self {
-        Self::IO(e)
-    }
+#[derive(Debug, derive_more::From)]
+pub enum ConfigError {
+    Parse(ConfigParseError),
+    Other(HgError),
 }
 
 fn make_regex(pattern: &'static str) -> Regex {
     Regex::new(pattern).expect("expected a valid regex")
 }
-
-/// Includes are relative to the file they're defined in, unless they're
-/// absolute.
-fn read_include(
-    old_src: &Path,
-    new_src: &Path,
-) -> (PathBuf, io::Result<Vec<u8>>) {
-    if new_src.is_absolute() {
-        (new_src.to_path_buf(), read_whole_file(&new_src))
-    } else {
-        let dir = old_src.parent().unwrap();
-        let new_src = dir.join(&new_src);
-        (new_src.to_owned(), read_whole_file(&new_src))
-    }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/config/values.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,61 @@
+//! Parsing functions for various type of configuration values.
+//!
+//! Returning `None` indicates a syntax error. Using a `Result` would be more
+//! correct but would take more boilerplate for converting between error types,
+//! compared to using `.ok()` on inner results of various error types to
+//! convert them all to options. The `Config::get_parse` method later converts
+//! those options to results with `ConfigValueParseError`, which contains
+//! details about where the value came from (but omits details of what’s
+//! invalid inside the value).
+
+pub(super) fn parse_bool(v: &[u8]) -> Option<bool> {
+    match v.to_ascii_lowercase().as_slice() {
+        b"1" | b"yes" | b"true" | b"on" | b"always" => Some(true),
+        b"0" | b"no" | b"false" | b"off" | b"never" => Some(false),
+        _ => None,
+    }
+}
+
+pub(super) fn parse_byte_size(value: &[u8]) -> Option<u64> {
+    let value = std::str::from_utf8(value).ok()?.to_ascii_lowercase();
+    const UNITS: &[(&str, u64)] = &[
+        ("g", 1 << 30),
+        ("gb", 1 << 30),
+        ("m", 1 << 20),
+        ("mb", 1 << 20),
+        ("k", 1 << 10),
+        ("kb", 1 << 10),
+        ("b", 1 << 0), // Needs to be last
+    ];
+    for &(unit, multiplier) in UNITS {
+        // TODO: use `value.strip_suffix(unit)` when we require Rust 1.45+
+        if value.ends_with(unit) {
+            let value_before_unit = &value[..value.len() - unit.len()];
+            let float: f64 = value_before_unit.trim().parse().ok()?;
+            if float >= 0.0 {
+                return Some((float * multiplier as f64).round() as u64);
+            } else {
+                return None;
+            }
+        }
+    }
+    value.parse().ok()
+}
+
+#[test]
+fn test_parse_byte_size() {
+    assert_eq!(parse_byte_size(b""), None);
+    assert_eq!(parse_byte_size(b"b"), None);
+
+    assert_eq!(parse_byte_size(b"12"), Some(12));
+    assert_eq!(parse_byte_size(b"12b"), Some(12));
+    assert_eq!(parse_byte_size(b"12 b"), Some(12));
+    assert_eq!(parse_byte_size(b"12.1 b"), Some(12));
+    assert_eq!(parse_byte_size(b"1.1 K"), Some(1126));
+    assert_eq!(parse_byte_size(b"1.1 kB"), Some(1126));
+
+    assert_eq!(parse_byte_size(b"-12 b"), None);
+    assert_eq!(parse_byte_size(b"-0.1 b"), None);
+    assert_eq!(parse_byte_size(b"0.1 b"), Some(0));
+    assert_eq!(parse_byte_size(b"12.1 b"), Some(12));
+}
--- a/rust/hg-core/src/copy_tracing.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/copy_tracing.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -1,46 +1,121 @@
+#[cfg(test)]
+#[macro_use]
+mod tests_support;
+
+#[cfg(test)]
+mod tests;
+
 use crate::utils::hg_path::HgPath;
 use crate::utils::hg_path::HgPathBuf;
 use crate::Revision;
 use crate::NULL_REVISION;
 
-use im_rc::ordmap::DiffItem;
+use bytes_cast::{unaligned, BytesCast};
 use im_rc::ordmap::Entry;
 use im_rc::ordmap::OrdMap;
+use im_rc::OrdSet;
 
 use std::cmp::Ordering;
 use std::collections::HashMap;
-use std::convert::TryInto;
 
 pub type PathCopies = HashMap<HgPathBuf, HgPathBuf>;
 
 type PathToken = usize;
 
-#[derive(Clone, Debug, PartialEq, Copy)]
-struct TimeStampedPathCopy {
+#[derive(Clone, Debug)]
+struct CopySource {
     /// revision at which the copy information was added
     rev: Revision,
     /// the copy source, (Set to None in case of deletion of the associated
     /// key)
     path: Option<PathToken>,
+    /// a set of previous `CopySource.rev` value directly or indirectly
+    /// overwritten by this one.
+    overwritten: OrdSet<Revision>,
+}
+
+impl CopySource {
+    /// create a new CopySource
+    ///
+    /// Use this when no previous copy source existed.
+    fn new(rev: Revision, path: Option<PathToken>) -> Self {
+        Self {
+            rev,
+            path,
+            overwritten: OrdSet::new(),
+        }
+    }
+
+    /// create a new CopySource from merging two others
+    ///
+    /// Use this when merging two InternalPathCopies requires active merging of
+    /// some entries.
+    fn new_from_merge(rev: Revision, winner: &Self, loser: &Self) -> Self {
+        let mut overwritten = OrdSet::new();
+        overwritten.extend(winner.overwritten.iter().copied());
+        overwritten.extend(loser.overwritten.iter().copied());
+        overwritten.insert(winner.rev);
+        overwritten.insert(loser.rev);
+        Self {
+            rev,
+            path: winner.path,
+            overwritten: overwritten,
+        }
+    }
+
+    /// Update the value of a pre-existing CopySource
+    ///
+    /// Use this when recording copy information from  parent → child edges
+    fn overwrite(&mut self, rev: Revision, path: Option<PathToken>) {
+        self.overwritten.insert(self.rev);
+        self.rev = rev;
+        self.path = path;
+    }
+
+    /// Mark pre-existing copy information as "dropped" by a file deletion
+    ///
+    /// Use this when recording copy information from  parent → child edges
+    fn mark_delete(&mut self, rev: Revision) {
+        self.overwritten.insert(self.rev);
+        self.rev = rev;
+        self.path = None;
+    }
+
+    /// Mark pre-existing copy information as "dropped" by a file deletion
+    ///
+    /// Use this when recording copy information from  parent → child edges
+    fn mark_delete_with_pair(&mut self, rev: Revision, other: &Self) {
+        self.overwritten.insert(self.rev);
+        if other.rev != rev {
+            self.overwritten.insert(other.rev);
+        }
+        self.overwritten.extend(other.overwritten.iter().copied());
+        self.rev = rev;
+        self.path = None;
+    }
+
+    fn is_overwritten_by(&self, other: &Self) -> bool {
+        other.overwritten.contains(&self.rev)
+    }
+}
+
+// For the same "dest", content generated for a given revision will always be
+// the same.
+impl PartialEq for CopySource {
+    fn eq(&self, other: &Self) -> bool {
+        #[cfg(debug_assertions)]
+        {
+            if self.rev == other.rev {
+                debug_assert!(self.path == other.path);
+                debug_assert!(self.overwritten == other.overwritten);
+            }
+        }
+        self.rev == other.rev
+    }
 }
 
 /// maps CopyDestination to Copy Source (+ a "timestamp" for the operation)
-type TimeStampedPathCopies = OrdMap<PathToken, TimeStampedPathCopy>;
-
-/// hold parent 1, parent 2 and relevant files actions.
-pub type RevInfo<'a> = (Revision, Revision, ChangedFiles<'a>);
-
-/// represent the files affected by a changesets
-///
-/// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
-/// all the data categories tracked by it.
-/// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
-/// all the data categories tracked by it.
-pub struct ChangedFiles<'a> {
-    nb_items: u32,
-    index: &'a [u8],
-    data: &'a [u8],
-}
+type InternalPathCopies = OrdMap<PathToken, CopySource>;
 
 /// Represent active changes that affect the copy tracing.
 enum Action<'a> {
@@ -51,7 +126,8 @@
     Removed(&'a HgPath),
     /// The parent ? children edge introduce copy information between (dest,
     /// source)
-    Copied(&'a HgPath, &'a HgPath),
+    CopiedFromP1(&'a HgPath, &'a HgPath),
+    CopiedFromP2(&'a HgPath, &'a HgPath),
 }
 
 /// This express the possible "special" case we can get in a merge
@@ -67,9 +143,6 @@
     Normal,
 }
 
-type FileChange<'a> = (u8, &'a HgPath, &'a HgPath);
-
-const EMPTY: &[u8] = b"";
 const COPY_MASK: u8 = 3;
 const P1_COPY: u8 = 2;
 const P2_COPY: u8 = 3;
@@ -78,142 +151,94 @@
 const MERGED: u8 = 8;
 const SALVAGED: u8 = 16;
 
-impl<'a> ChangedFiles<'a> {
-    const INDEX_START: usize = 4;
-    const ENTRY_SIZE: u32 = 9;
-    const FILENAME_START: u32 = 1;
-    const COPY_SOURCE_START: u32 = 5;
+#[derive(BytesCast)]
+#[repr(C)]
+struct ChangedFilesIndexEntry {
+    flags: u8,
 
-    pub fn new(data: &'a [u8]) -> Self {
-        assert!(
-            data.len() >= 4,
-            "data size ({}) is too small to contain the header (4)",
-            data.len()
-        );
-        let nb_items_raw: [u8; 4] = (&data[0..=3])
-            .try_into()
-            .expect("failed to turn 4 bytes into 4 bytes");
-        let nb_items = u32::from_be_bytes(nb_items_raw);
+    /// Only the end position is stored. The start is at the end of the
+    /// previous entry.
+    destination_path_end_position: unaligned::U32Be,
 
-        let index_size = (nb_items * Self::ENTRY_SIZE) as usize;
-        let index_end = Self::INDEX_START + index_size;
+    source_index_entry_position: unaligned::U32Be,
+}
+
+fn _static_assert_size_of() {
+    let _ = std::mem::transmute::<ChangedFilesIndexEntry, [u8; 9]>;
+}
 
-        assert!(
-            data.len() >= index_end,
-            "data size ({}) is too small to fit the index_data ({})",
-            data.len(),
-            index_end
-        );
+/// Represents the files affected by a changeset.
+///
+/// This holds a subset of `mercurial.metadata.ChangingFiles` as we do not need
+/// all the data categories tracked by it.
+pub struct ChangedFiles<'a> {
+    index: &'a [ChangedFilesIndexEntry],
+    paths: &'a [u8],
+}
 
-        let ret = ChangedFiles {
-            nb_items,
-            index: &data[Self::INDEX_START..index_end],
-            data: &data[index_end..],
-        };
-        let max_data = ret.filename_end(nb_items - 1) as usize;
-        assert!(
-            ret.data.len() >= max_data,
-            "data size ({}) is too small to fit all data ({})",
-            data.len(),
-            index_end + max_data
-        );
-        ret
+impl<'a> ChangedFiles<'a> {
+    pub fn new(data: &'a [u8]) -> Self {
+        let (header, rest) = unaligned::U32Be::from_bytes(data).unwrap();
+        let nb_index_entries = header.get() as usize;
+        let (index, paths) =
+            ChangedFilesIndexEntry::slice_from_bytes(rest, nb_index_entries)
+                .unwrap();
+        Self { index, paths }
     }
 
     pub fn new_empty() -> Self {
         ChangedFiles {
-            nb_items: 0,
-            index: EMPTY,
-            data: EMPTY,
+            index: &[],
+            paths: &[],
         }
     }
 
-    /// internal function to return an individual entry at a given index
-    fn entry(&'a self, idx: u32) -> FileChange<'a> {
-        if idx >= self.nb_items {
-            panic!(
-                "index for entry is higher that the number of file {} >= {}",
-                idx, self.nb_items
-            )
-        }
-        let flags = self.flags(idx);
-        let filename = self.filename(idx);
-        let copy_idx = self.copy_idx(idx);
-        let copy_source = self.filename(copy_idx);
-        (flags, filename, copy_source)
-    }
-
-    /// internal function to return the filename of the entry at a given index
-    fn filename(&self, idx: u32) -> &HgPath {
-        let filename_start;
-        if idx == 0 {
-            filename_start = 0;
+    /// Internal function to return the filename of the entry at a given index
+    fn path(&self, idx: usize) -> &HgPath {
+        let start = if idx == 0 {
+            0
         } else {
-            filename_start = self.filename_end(idx - 1)
-        }
-        let filename_end = self.filename_end(idx);
-        let filename_start = filename_start as usize;
-        let filename_end = filename_end as usize;
-        HgPath::new(&self.data[filename_start..filename_end])
-    }
-
-    /// internal function to return the flag field of the entry at a given
-    /// index
-    fn flags(&self, idx: u32) -> u8 {
-        let idx = idx as usize;
-        self.index[idx * (Self::ENTRY_SIZE as usize)]
-    }
-
-    /// internal function to return the end of a filename part at a given index
-    fn filename_end(&self, idx: u32) -> u32 {
-        let start = (idx * Self::ENTRY_SIZE) + Self::FILENAME_START;
-        let end = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
-        let start = start as usize;
-        let end = end as usize;
-        let raw = (&self.index[start..end])
-            .try_into()
-            .expect("failed to turn 4 bytes into 4 bytes");
-        u32::from_be_bytes(raw)
-    }
-
-    /// internal function to return index of the copy source of the entry at a
-    /// given index
-    fn copy_idx(&self, idx: u32) -> u32 {
-        let start = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
-        let end = (idx + 1) * Self::ENTRY_SIZE;
-        let start = start as usize;
-        let end = end as usize;
-        let raw = (&self.index[start..end])
-            .try_into()
-            .expect("failed to turn 4 bytes into 4 bytes");
-        u32::from_be_bytes(raw)
+            self.index[idx - 1].destination_path_end_position.get() as usize
+        };
+        let end = self.index[idx].destination_path_end_position.get() as usize;
+        HgPath::new(&self.paths[start..end])
     }
 
     /// Return an iterator over all the `Action` in this instance.
-    fn iter_actions(&self, parent: Parent) -> ActionsIterator {
-        ActionsIterator {
-            changes: &self,
-            parent: parent,
-            current: 0,
-        }
+    fn iter_actions(&self) -> impl Iterator<Item = Action> {
+        self.index.iter().enumerate().flat_map(move |(idx, entry)| {
+            let path = self.path(idx);
+            if (entry.flags & ACTION_MASK) == REMOVED {
+                Some(Action::Removed(path))
+            } else if (entry.flags & COPY_MASK) == P1_COPY {
+                let source_idx =
+                    entry.source_index_entry_position.get() as usize;
+                Some(Action::CopiedFromP1(path, self.path(source_idx)))
+            } else if (entry.flags & COPY_MASK) == P2_COPY {
+                let source_idx =
+                    entry.source_index_entry_position.get() as usize;
+                Some(Action::CopiedFromP2(path, self.path(source_idx)))
+            } else {
+                None
+            }
+        })
     }
 
     /// return the MergeCase value associated with a filename
     fn get_merge_case(&self, path: &HgPath) -> MergeCase {
-        if self.nb_items == 0 {
+        if self.index.is_empty() {
             return MergeCase::Normal;
         }
         let mut low_part = 0;
-        let mut high_part = self.nb_items;
+        let mut high_part = self.index.len();
 
         while low_part < high_part {
             let cursor = (low_part + high_part - 1) / 2;
-            let (flags, filename, _source) = self.entry(cursor);
-            match path.cmp(filename) {
+            match path.cmp(self.path(cursor)) {
                 Ordering::Less => low_part = cursor + 1,
                 Ordering::Greater => high_part = cursor,
                 Ordering::Equal => {
-                    return match flags & ACTION_MASK {
+                    return match self.index[cursor].flags & ACTION_MASK {
                         MERGED => MergeCase::Merged,
                         SALVAGED => MergeCase::Salvaged,
                         _ => MergeCase::Normal,
@@ -225,100 +250,6 @@
     }
 }
 
-/// A struct responsible for answering "is X ancestors of Y" quickly
-///
-/// The structure will delegate ancestors call to a callback, and cache the
-/// result.
-#[derive(Debug)]
-struct AncestorOracle<'a, A: Fn(Revision, Revision) -> bool> {
-    inner: &'a A,
-    pairs: HashMap<(Revision, Revision), bool>,
-}
-
-impl<'a, A: Fn(Revision, Revision) -> bool> AncestorOracle<'a, A> {
-    fn new(func: &'a A) -> Self {
-        Self {
-            inner: func,
-            pairs: HashMap::default(),
-        }
-    }
-
-    fn record_overwrite(&mut self, anc: Revision, desc: Revision) {
-        self.pairs.insert((anc, desc), true);
-    }
-
-    /// returns `true` if `anc` is an ancestors of `desc`, `false` otherwise
-    fn is_overwrite(&mut self, anc: Revision, desc: Revision) -> bool {
-        if anc > desc {
-            false
-        } else if anc == desc {
-            true
-        } else {
-            if let Some(b) = self.pairs.get(&(anc, desc)) {
-                *b
-            } else {
-                let b = (self.inner)(anc, desc);
-                self.pairs.insert((anc, desc), b);
-                b
-            }
-        }
-    }
-}
-
-struct ActionsIterator<'a> {
-    changes: &'a ChangedFiles<'a>,
-    parent: Parent,
-    current: u32,
-}
-
-impl<'a> Iterator for ActionsIterator<'a> {
-    type Item = Action<'a>;
-
-    fn next(&mut self) -> Option<Action<'a>> {
-        let copy_flag = match self.parent {
-            Parent::FirstParent => P1_COPY,
-            Parent::SecondParent => P2_COPY,
-        };
-        while self.current < self.changes.nb_items {
-            let (flags, file, source) = self.changes.entry(self.current);
-            self.current += 1;
-            if (flags & ACTION_MASK) == REMOVED {
-                return Some(Action::Removed(file));
-            }
-            let copy = flags & COPY_MASK;
-            if copy == copy_flag {
-                return Some(Action::Copied(file, source));
-            }
-        }
-        return None;
-    }
-}
-
-/// A small struct whose purpose is to ensure lifetime of bytes referenced in
-/// ChangedFiles
-///
-/// It is passed to the RevInfoMaker callback who can assign any necessary
-/// content to the `data` attribute. The copy tracing code is responsible for
-/// keeping the DataHolder alive at least as long as the ChangedFiles object.
-pub struct DataHolder<D> {
-    /// RevInfoMaker callback should assign data referenced by the
-    /// ChangedFiles struct it return to this attribute. The DataHolder
-    /// lifetime will be at least as long as the ChangedFiles one.
-    pub data: Option<D>,
-}
-
-pub type RevInfoMaker<'a, D> =
-    Box<dyn for<'r> Fn(Revision, &'r mut DataHolder<D>) -> RevInfo<'r> + 'a>;
-
-/// enum used to carry information about the parent → child currently processed
-#[derive(Copy, Clone, Debug)]
-enum Parent {
-    /// The `p1(x) → x` edge
-    FirstParent,
-    /// The `p2(x) → x` edge
-    SecondParent,
-}
-
 /// A small "tokenizer" responsible of turning full HgPath into lighter
 /// PathToken
 ///
@@ -345,123 +276,110 @@
     }
 
     fn untokenize(&self, token: PathToken) -> &HgPathBuf {
-        assert!(token < self.path.len(), format!("Unknown token: {}", token));
+        assert!(token < self.path.len(), "Unknown token: {}", token);
         &self.path[token]
     }
 }
 
 /// Same as mercurial.copies._combine_changeset_copies, but in Rust.
-///
-/// Arguments are:
-///
-/// revs: all revisions to be considered
-/// children: a {parent ? [childrens]} mapping
-/// target_rev: the final revision we are combining copies to
-/// rev_info(rev): callback to get revision information:
-///   * first parent
-///   * second parent
-///   * ChangedFiles
-/// isancestors(low_rev, high_rev): callback to check if a revision is an
-///                                 ancestor of another
-pub fn combine_changeset_copies<A: Fn(Revision, Revision) -> bool, D>(
-    revs: Vec<Revision>,
-    mut children_count: HashMap<Revision, usize>,
-    target_rev: Revision,
-    rev_info: RevInfoMaker<D>,
-    is_ancestor: &A,
-) -> PathCopies {
-    let mut all_copies = HashMap::new();
-    let mut oracle = AncestorOracle::new(is_ancestor);
-
-    let mut path_map = TwoWayPathMap::default();
-
-    for rev in revs {
-        let mut d: DataHolder<D> = DataHolder { data: None };
-        let (p1, p2, changes) = rev_info(rev, &mut d);
+pub struct CombineChangesetCopies {
+    all_copies: HashMap<Revision, InternalPathCopies>,
+    path_map: TwoWayPathMap,
+    children_count: HashMap<Revision, usize>,
+}
 
-        // We will chain the copies information accumulated for the parent with
-        // the individual copies information the curent revision.  Creating a
-        // new TimeStampedPath for each `rev` → `children` vertex.
-        let mut copies: Option<TimeStampedPathCopies> = None;
-        if p1 != NULL_REVISION {
-            // Retrieve data computed in a previous iteration
-            let parent_copies = get_and_clean_parent_copies(
-                &mut all_copies,
-                &mut children_count,
-                p1,
-            );
-            if let Some(parent_copies) = parent_copies {
-                // combine it with data for that revision
-                let vertex_copies = add_from_changes(
-                    &mut path_map,
-                    &mut oracle,
-                    &parent_copies,
-                    &changes,
-                    Parent::FirstParent,
-                    rev,
-                );
-                // keep that data around for potential later combination
-                copies = Some(vertex_copies);
-            }
-        }
-        if p2 != NULL_REVISION {
-            // Retrieve data computed in a previous iteration
-            let parent_copies = get_and_clean_parent_copies(
-                &mut all_copies,
-                &mut children_count,
-                p2,
-            );
-            if let Some(parent_copies) = parent_copies {
-                // combine it with data for that revision
-                let vertex_copies = add_from_changes(
-                    &mut path_map,
-                    &mut oracle,
-                    &parent_copies,
-                    &changes,
-                    Parent::SecondParent,
-                    rev,
-                );
-
-                copies = match copies {
-                    None => Some(vertex_copies),
-                    // Merge has two parents needs to combines their copy
-                    // information.
-                    //
-                    // If we got data from both parents, We need to combine
-                    // them.
-                    Some(copies) => Some(merge_copies_dict(
-                        &path_map,
-                        rev,
-                        vertex_copies,
-                        copies,
-                        &changes,
-                        &mut oracle,
-                    )),
-                };
-            }
-        }
-        match copies {
-            Some(copies) => {
-                all_copies.insert(rev, copies);
-            }
-            _ => {}
+impl CombineChangesetCopies {
+    pub fn new(children_count: HashMap<Revision, usize>) -> Self {
+        Self {
+            all_copies: HashMap::new(),
+            path_map: TwoWayPathMap::default(),
+            children_count,
         }
     }
 
-    // Drop internal information (like the timestamp) and return the final
-    // mapping.
-    let tt_result = all_copies
-        .remove(&target_rev)
-        .expect("target revision was not processed");
-    let mut result = PathCopies::default();
-    for (dest, tt_source) in tt_result {
-        if let Some(path) = tt_source.path {
-            let path_dest = path_map.untokenize(dest).to_owned();
-            let path_path = path_map.untokenize(path).to_owned();
-            result.insert(path_dest, path_path);
+    /// Combined the given `changes` data specific to `rev` with the data
+    /// previously given for its parents (and transitively, its ancestors).
+    pub fn add_revision(
+        &mut self,
+        rev: Revision,
+        p1: Revision,
+        p2: Revision,
+        changes: ChangedFiles<'_>,
+    ) {
+        self.add_revision_inner(rev, p1, p2, changes.iter_actions(), |path| {
+            changes.get_merge_case(path)
+        })
+    }
+
+    /// Separated out from `add_revsion` so that unit tests can call this
+    /// without synthetizing a `ChangedFiles` in binary format.
+    fn add_revision_inner<'a>(
+        &mut self,
+        rev: Revision,
+        p1: Revision,
+        p2: Revision,
+        copy_actions: impl Iterator<Item = Action<'a>>,
+        get_merge_case: impl Fn(&HgPath) -> MergeCase + Copy,
+    ) {
+        // Retrieve data computed in a previous iteration
+        let p1_copies = match p1 {
+            NULL_REVISION => None,
+            _ => get_and_clean_parent_copies(
+                &mut self.all_copies,
+                &mut self.children_count,
+                p1,
+            ), // will be None if the vertex is not to be traversed
+        };
+        let p2_copies = match p2 {
+            NULL_REVISION => None,
+            _ => get_and_clean_parent_copies(
+                &mut self.all_copies,
+                &mut self.children_count,
+                p2,
+            ), // will be None if the vertex is not to be traversed
+        };
+        // combine it with data for that revision
+        let (p1_copies, p2_copies) = chain_changes(
+            &mut self.path_map,
+            p1_copies,
+            p2_copies,
+            copy_actions,
+            rev,
+        );
+        let copies = match (p1_copies, p2_copies) {
+            (None, None) => None,
+            (c, None) => c,
+            (None, c) => c,
+            (Some(p1_copies), Some(p2_copies)) => Some(merge_copies_dict(
+                &self.path_map,
+                rev,
+                p2_copies,
+                p1_copies,
+                get_merge_case,
+            )),
+        };
+        if let Some(c) = copies {
+            self.all_copies.insert(rev, c);
         }
     }
-    result
+
+    /// Drop intermediate data (such as which revision a copy was from) and
+    /// return the final mapping.
+    pub fn finish(mut self, target_rev: Revision) -> PathCopies {
+        let tt_result = self
+            .all_copies
+            .remove(&target_rev)
+            .expect("target revision was not processed");
+        let mut result = PathCopies::default();
+        for (dest, tt_source) in tt_result {
+            if let Some(path) = tt_source.path {
+                let path_dest = self.path_map.untokenize(dest).to_owned();
+                let path_path = self.path_map.untokenize(path).to_owned();
+                result.insert(path_dest, path_path);
+            }
+        }
+        result
+    }
 }
 
 /// fetch previous computed information
@@ -471,68 +389,67 @@
 ///
 /// If parent is not part of the set we are expected to walk, return None.
 fn get_and_clean_parent_copies(
-    all_copies: &mut HashMap<Revision, TimeStampedPathCopies>,
+    all_copies: &mut HashMap<Revision, InternalPathCopies>,
     children_count: &mut HashMap<Revision, usize>,
     parent_rev: Revision,
-) -> Option<TimeStampedPathCopies> {
+) -> Option<InternalPathCopies> {
     let count = children_count.get_mut(&parent_rev)?;
     *count -= 1;
     if *count == 0 {
         match all_copies.remove(&parent_rev) {
             Some(c) => Some(c),
-            None => Some(TimeStampedPathCopies::default()),
+            None => Some(InternalPathCopies::default()),
         }
     } else {
         match all_copies.get(&parent_rev) {
             Some(c) => Some(c.clone()),
-            None => Some(TimeStampedPathCopies::default()),
+            None => Some(InternalPathCopies::default()),
         }
     }
 }
 
 /// Combine ChangedFiles with some existing PathCopies information and return
 /// the result
-fn add_from_changes<A: Fn(Revision, Revision) -> bool>(
+fn chain_changes<'a>(
     path_map: &mut TwoWayPathMap,
-    oracle: &mut AncestorOracle<A>,
-    base_copies: &TimeStampedPathCopies,
-    changes: &ChangedFiles,
-    parent: Parent,
+    base_p1_copies: Option<InternalPathCopies>,
+    base_p2_copies: Option<InternalPathCopies>,
+    copy_actions: impl Iterator<Item = Action<'a>>,
     current_rev: Revision,
-) -> TimeStampedPathCopies {
-    let mut copies = base_copies.clone();
-    for action in changes.iter_actions(parent) {
+) -> (Option<InternalPathCopies>, Option<InternalPathCopies>) {
+    // Fast path the "nothing to do" case.
+    if let (None, None) = (&base_p1_copies, &base_p2_copies) {
+        return (None, None);
+    }
+
+    let mut p1_copies = base_p1_copies.clone();
+    let mut p2_copies = base_p2_copies.clone();
+    for action in copy_actions {
         match action {
-            Action::Copied(path_dest, path_source) => {
-                let dest = path_map.tokenize(path_dest);
-                let source = path_map.tokenize(path_source);
-                let entry;
-                if let Some(v) = base_copies.get(&source) {
-                    entry = match &v.path {
-                        Some(path) => Some((*(path)).to_owned()),
-                        None => Some(source.to_owned()),
-                    }
-                } else {
-                    entry = Some(source.to_owned());
+            Action::CopiedFromP1(path_dest, path_source) => {
+                match &mut p1_copies {
+                    None => (), // This is not a vertex we should proceed.
+                    Some(copies) => add_one_copy(
+                        current_rev,
+                        path_map,
+                        copies,
+                        base_p1_copies.as_ref().unwrap(),
+                        path_dest,
+                        path_source,
+                    ),
                 }
-                // Each new entry is introduced by the children, we
-                // record this information as we will need it to take
-                // the right decision when merging conflicting copy
-                // information. See merge_copies_dict for details.
-                match copies.entry(dest) {
-                    Entry::Vacant(slot) => {
-                        let ttpc = TimeStampedPathCopy {
-                            rev: current_rev,
-                            path: entry,
-                        };
-                        slot.insert(ttpc);
-                    }
-                    Entry::Occupied(mut slot) => {
-                        let mut ttpc = slot.get_mut();
-                        oracle.record_overwrite(ttpc.rev, current_rev);
-                        ttpc.rev = current_rev;
-                        ttpc.path = entry;
-                    }
+            }
+            Action::CopiedFromP2(path_dest, path_source) => {
+                match &mut p2_copies {
+                    None => (), // This is not a vertex we should proceed.
+                    Some(copies) => add_one_copy(
+                        current_rev,
+                        path_map,
+                        copies,
+                        base_p2_copies.as_ref().unwrap(),
+                        path_dest,
+                        path_source,
+                    ),
                 }
             }
             Action::Removed(deleted_path) => {
@@ -540,164 +457,131 @@
                 //
                 // We need to explicitly record them as dropped to
                 // propagate this information when merging two
-                // TimeStampedPathCopies object.
+                // InternalPathCopies object.
                 let deleted = path_map.tokenize(deleted_path);
-                copies.entry(deleted).and_modify(|old| {
-                    oracle.record_overwrite(old.rev, current_rev);
-                    old.rev = current_rev;
-                    old.path = None;
-                });
+
+                let p1_entry = match &mut p1_copies {
+                    None => None,
+                    Some(copies) => match copies.entry(deleted) {
+                        Entry::Occupied(e) => Some(e),
+                        Entry::Vacant(_) => None,
+                    },
+                };
+                let p2_entry = match &mut p2_copies {
+                    None => None,
+                    Some(copies) => match copies.entry(deleted) {
+                        Entry::Occupied(e) => Some(e),
+                        Entry::Vacant(_) => None,
+                    },
+                };
+
+                match (p1_entry, p2_entry) {
+                    (None, None) => (),
+                    (Some(mut e), None) => {
+                        e.get_mut().mark_delete(current_rev)
+                    }
+                    (None, Some(mut e)) => {
+                        e.get_mut().mark_delete(current_rev)
+                    }
+                    (Some(mut e1), Some(mut e2)) => {
+                        let cs1 = e1.get_mut();
+                        let cs2 = e2.get();
+                        if cs1 == cs2 {
+                            cs1.mark_delete(current_rev);
+                        } else {
+                            cs1.mark_delete_with_pair(current_rev, &cs2);
+                        }
+                        e2.insert(cs1.clone());
+                    }
+                }
             }
         }
     }
-    copies
+    (p1_copies, p2_copies)
+}
+
+// insert one new copy information in an InternalPathCopies
+//
+// This deal with chaining and overwrite.
+fn add_one_copy(
+    current_rev: Revision,
+    path_map: &mut TwoWayPathMap,
+    copies: &mut InternalPathCopies,
+    base_copies: &InternalPathCopies,
+    path_dest: &HgPath,
+    path_source: &HgPath,
+) {
+    let dest = path_map.tokenize(path_dest);
+    let source = path_map.tokenize(path_source);
+    let entry;
+    if let Some(v) = base_copies.get(&source) {
+        entry = match &v.path {
+            Some(path) => Some((*(path)).to_owned()),
+            None => Some(source.to_owned()),
+        }
+    } else {
+        entry = Some(source.to_owned());
+    }
+    // Each new entry is introduced by the children, we
+    // record this information as we will need it to take
+    // the right decision when merging conflicting copy
+    // information. See merge_copies_dict for details.
+    match copies.entry(dest) {
+        Entry::Vacant(slot) => {
+            let ttpc = CopySource::new(current_rev, entry);
+            slot.insert(ttpc);
+        }
+        Entry::Occupied(mut slot) => {
+            let ttpc = slot.get_mut();
+            ttpc.overwrite(current_rev, entry);
+        }
+    }
 }
 
 /// merge two copies-mapping together, minor and major
 ///
 /// In case of conflict, value from "major" will be picked, unless in some
 /// cases. See inline documentation for details.
-fn merge_copies_dict<A: Fn(Revision, Revision) -> bool>(
+fn merge_copies_dict(
     path_map: &TwoWayPathMap,
     current_merge: Revision,
-    mut minor: TimeStampedPathCopies,
-    mut major: TimeStampedPathCopies,
-    changes: &ChangedFiles,
-    oracle: &mut AncestorOracle<A>,
-) -> TimeStampedPathCopies {
-    // This closure exist as temporary help while multiple developper are
-    // actively working on this code. Feel free to re-inline it once this
-    // code is more settled.
-    let mut cmp_value =
-        |dest: &PathToken,
-         src_minor: &TimeStampedPathCopy,
-         src_major: &TimeStampedPathCopy| {
-            compare_value(
-                path_map,
-                current_merge,
-                changes,
-                oracle,
-                dest,
-                src_minor,
-                src_major,
-            )
-        };
-    if minor.is_empty() {
-        major
-    } else if major.is_empty() {
-        minor
-    } else if minor.len() * 2 < major.len() {
-        // Lets says we are merging two TimeStampedPathCopies instance A and B.
-        //
-        // If A contains N items, the merge result will never contains more
-        // than N values differents than the one in A
-        //
-        // If B contains M items, with M > N, the merge result will always
-        // result in a minimum of M - N value differents than the on in
-        // A
-        //
-        // As a result, if N < (M-N), we know that simply iterating over A will
-        // yield less difference than iterating over the difference
-        // between A and B.
-        //
-        // This help performance a lot in case were a tiny
-        // TimeStampedPathCopies is merged with a much larger one.
-        for (dest, src_minor) in minor {
-            let src_major = major.get(&dest);
-            match src_major {
-                None => major.insert(dest, src_minor),
-                Some(src_major) => {
-                    match cmp_value(&dest, &src_minor, src_major) {
-                        MergePick::Any | MergePick::Major => None,
-                        MergePick::Minor => major.insert(dest, src_minor),
-                    }
-                }
+    minor: InternalPathCopies,
+    major: InternalPathCopies,
+    get_merge_case: impl Fn(&HgPath) -> MergeCase + Copy,
+) -> InternalPathCopies {
+    use crate::utils::{ordmap_union_with_merge, MergeResult};
+
+    ordmap_union_with_merge(minor, major, |&dest, src_minor, src_major| {
+        let (pick, overwrite) = compare_value(
+            current_merge,
+            || get_merge_case(path_map.untokenize(dest)),
+            src_minor,
+            src_major,
+        );
+        if overwrite {
+            let (winner, loser) = match pick {
+                MergePick::Major | MergePick::Any => (src_major, src_minor),
+                MergePick::Minor => (src_minor, src_major),
             };
-        }
-        major
-    } else if major.len() * 2 < minor.len() {
-        // This use the same rational than the previous block.
-        // (Check previous block documentation for details.)
-        for (dest, src_major) in major {
-            let src_minor = minor.get(&dest);
-            match src_minor {
-                None => minor.insert(dest, src_major),
-                Some(src_minor) => {
-                    match cmp_value(&dest, src_minor, &src_major) {
-                        MergePick::Any | MergePick::Minor => None,
-                        MergePick::Major => minor.insert(dest, src_major),
-                    }
+            MergeResult::UseNewValue(CopySource::new_from_merge(
+                current_merge,
+                winner,
+                loser,
+            ))
+        } else {
+            match pick {
+                MergePick::Any | MergePick::Major => {
+                    MergeResult::UseRightValue
                 }
-            };
-        }
-        minor
-    } else {
-        let mut override_minor = Vec::new();
-        let mut override_major = Vec::new();
-
-        let mut to_major = |k: &PathToken, v: &TimeStampedPathCopy| {
-            override_major.push((k.clone(), v.clone()))
-        };
-        let mut to_minor = |k: &PathToken, v: &TimeStampedPathCopy| {
-            override_minor.push((k.clone(), v.clone()))
-        };
-
-        // The diff function leverage detection of the identical subpart if
-        // minor and major has some common ancestors. This make it very
-        // fast is most case.
-        //
-        // In case where the two map are vastly different in size, the current
-        // approach is still slowish because the iteration will iterate over
-        // all the "exclusive" content of the larger on. This situation can be
-        // frequent when the subgraph of revision we are processing has a lot
-        // of roots. Each roots adding they own fully new map to the mix (and
-        // likely a small map, if the path from the root to the "main path" is
-        // small.
-        //
-        // We could do better by detecting such situation and processing them
-        // differently.
-        for d in minor.diff(&major) {
-            match d {
-                DiffItem::Add(k, v) => to_minor(k, v),
-                DiffItem::Remove(k, v) => to_major(k, v),
-                DiffItem::Update { old, new } => {
-                    let (dest, src_major) = new;
-                    let (_, src_minor) = old;
-                    match cmp_value(dest, src_minor, src_major) {
-                        MergePick::Major => to_minor(dest, src_major),
-                        MergePick::Minor => to_major(dest, src_minor),
-                        // If the two entry are identical, no need to do
-                        // anything (but diff should not have yield them)
-                        MergePick::Any => unreachable!(),
-                    }
-                }
-            };
-        }
-
-        let updates;
-        let mut result;
-        if override_major.is_empty() {
-            result = major
-        } else if override_minor.is_empty() {
-            result = minor
-        } else {
-            if override_minor.len() < override_major.len() {
-                updates = override_minor;
-                result = minor;
-            } else {
-                updates = override_major;
-                result = major;
-            }
-            for (k, v) in updates {
-                result.insert(k, v);
+                MergePick::Minor => MergeResult::UseLeftValue,
             }
         }
-        result
-    }
+    })
 }
 
 /// represent the side that should prevail when merging two
-/// TimeStampedPathCopies
+/// InternalPathCopies
+#[derive(Debug, PartialEq)]
 enum MergePick {
     /// The "major" (p1) side prevails
     Major,
@@ -709,89 +593,88 @@
 
 /// decide which side prevails in case of conflicting values
 #[allow(clippy::if_same_then_else)]
-fn compare_value<A: Fn(Revision, Revision) -> bool>(
-    path_map: &TwoWayPathMap,
+fn compare_value(
     current_merge: Revision,
-    changes: &ChangedFiles,
-    oracle: &mut AncestorOracle<A>,
-    dest: &PathToken,
-    src_minor: &TimeStampedPathCopy,
-    src_major: &TimeStampedPathCopy,
-) -> MergePick {
-    if src_major.rev == current_merge {
-        if src_minor.rev == current_merge {
-            if src_major.path.is_none() {
-                // We cannot get different copy information for both p1 and p2
-                // from the same revision. Unless this was a
-                // deletion
-                MergePick::Any
-            } else {
-                unreachable!();
-            }
-        } else {
-            // The last value comes the current merge, this value -will- win
-            // eventually.
-            oracle.record_overwrite(src_minor.rev, src_major.rev);
-            MergePick::Major
-        }
+    merge_case_for_dest: impl Fn() -> MergeCase,
+    src_minor: &CopySource,
+    src_major: &CopySource,
+) -> (MergePick, bool) {
+    if src_major == src_minor {
+        (MergePick::Any, false)
+    } else if src_major.rev == current_merge {
+        // minor is different according to per minor == major check earlier
+        debug_assert!(src_minor.rev != current_merge);
+
+        // The last value comes the current merge, this value -will- win
+        // eventually.
+        (MergePick::Major, true)
     } else if src_minor.rev == current_merge {
         // The last value comes the current merge, this value -will- win
         // eventually.
-        oracle.record_overwrite(src_major.rev, src_minor.rev);
-        MergePick::Minor
+        (MergePick::Minor, true)
     } else if src_major.path == src_minor.path {
+        debug_assert!(src_major.rev != src_major.rev);
         // we have the same value, but from other source;
-        if src_major.rev == src_minor.rev {
-            // If the two entry are identical, they are both valid
-            MergePick::Any
-        } else if oracle.is_overwrite(src_major.rev, src_minor.rev) {
-            MergePick::Minor
+        if src_major.is_overwritten_by(src_minor) {
+            (MergePick::Minor, false)
+        } else if src_minor.is_overwritten_by(src_major) {
+            (MergePick::Major, false)
         } else {
-            MergePick::Major
+            (MergePick::Any, true)
         }
-    } else if src_major.rev == src_minor.rev {
-        // We cannot get copy information for both p1 and p2 in the
-        // same rev. So this is the same value.
-        unreachable!(
-            "conflict information from p1 and p2 in the same revision"
-        );
     } else {
-        let dest_path = path_map.untokenize(*dest);
-        let action = changes.get_merge_case(dest_path);
-        if src_major.path.is_none() && action == MergeCase::Salvaged {
+        debug_assert!(src_major.rev != src_major.rev);
+        let action = merge_case_for_dest();
+        if src_minor.path.is_some()
+            && src_major.path.is_none()
+            && action == MergeCase::Salvaged
+        {
             // If the file is "deleted" in the major side but was
             // salvaged by the merge, we keep the minor side alive
-            MergePick::Minor
-        } else if src_minor.path.is_none() && action == MergeCase::Salvaged {
+            (MergePick::Minor, true)
+        } else if src_major.path.is_some()
+            && src_minor.path.is_none()
+            && action == MergeCase::Salvaged
+        {
             // If the file is "deleted" in the minor side but was
             // salvaged by the merge, unconditionnaly preserve the
             // major side.
-            MergePick::Major
-        } else if action == MergeCase::Merged {
-            // If the file was actively merged, copy information
-            // from each side might conflict.  The major side will
-            // win such conflict.
-            MergePick::Major
-        } else if oracle.is_overwrite(src_major.rev, src_minor.rev) {
-            // If the minor side is strictly newer than the major
-            // side, it should be kept.
-            MergePick::Minor
-        } else if src_major.path.is_some() {
-            // without any special case, the "major" value win
-            // other the "minor" one.
-            MergePick::Major
-        } else if oracle.is_overwrite(src_minor.rev, src_major.rev) {
-            // the "major" rev is a direct ancestors of "minor",
-            // any different value should
-            // overwrite
-            MergePick::Major
+            (MergePick::Major, true)
+        } else if src_minor.is_overwritten_by(src_major) {
+            // The information from the minor version are strictly older than
+            // the major version
+            if action == MergeCase::Merged {
+                // If the file was actively merged, its means some non-copy
+                // activity happened on the other branch. It
+                // mean the older copy information are still relevant.
+                //
+                // The major side wins such conflict.
+                (MergePick::Major, true)
+            } else {
+                // No activity on the minor branch, pick the newer one.
+                (MergePick::Major, false)
+            }
+        } else if src_major.is_overwritten_by(src_minor) {
+            if action == MergeCase::Merged {
+                // If the file was actively merged, its means some non-copy
+                // activity happened on the other branch. It
+                // mean the older copy information are still relevant.
+                //
+                // The major side wins such conflict.
+                (MergePick::Major, true)
+            } else {
+                // No activity on the minor branch, pick the newer one.
+                (MergePick::Minor, false)
+            }
+        } else if src_minor.path.is_none() {
+            // the minor side has no relevant information, pick the alive one
+            (MergePick::Major, true)
+        } else if src_major.path.is_none() {
+            // the major side has no relevant information, pick the alive one
+            (MergePick::Minor, true)
         } else {
-            // major version is None (so the file was deleted on
-            // that branch) and that branch is independant (neither
-            // minor nor major is an ancestors of the other one.)
-            // We preserve the new
-            // information about the new file.
-            MergePick::Minor
+            // by default the major side wins
+            (MergePick::Major, true)
         }
     }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/copy_tracing/tests.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,141 @@
+use super::*;
+
+/// Unit tests for:
+///
+/// ```ignore
+/// fn compare_value(
+///     current_merge: Revision,
+///     merge_case_for_dest: impl Fn() -> MergeCase,
+///     src_minor: &CopySource,
+///     src_major: &CopySource,
+/// ) -> (MergePick, /* overwrite: */ bool)
+///  ```
+#[test]
+fn test_compare_value() {
+    // The `compare_value!` macro calls the `compare_value` function with
+    // arguments given in pseudo-syntax:
+    //
+    // * For `merge_case_for_dest` it takes a plain `MergeCase` value instead
+    //   of a closure.
+    // * `CopySource` values are represented as `(rev, path, overwritten)`
+    //   tuples of type `(Revision, Option<PathToken>, OrdSet<Revision>)`.
+    // * `PathToken` is an integer not read by `compare_value`. It only checks
+    //   for `Some(_)` indicating a file copy v.s. `None` for a file deletion.
+    // * `OrdSet<Revision>` is represented as a Python-like set literal.
+
+    use MergeCase::*;
+    use MergePick::*;
+
+    assert_eq!(
+        compare_value!(1, Normal, (1, None, { 1 }), (1, None, { 1 })),
+        (Any, false)
+    );
+}
+
+/// Unit tests for:
+///
+/// ```ignore
+/// fn merge_copies_dict(
+///     path_map: &TwoWayPathMap, // Not visible in test cases
+///     current_merge: Revision,
+///     minor: InternalPathCopies,
+///     major: InternalPathCopies,
+///     get_merge_case: impl Fn(&HgPath) -> MergeCase + Copy,
+/// ) -> InternalPathCopies
+/// ```
+#[test]
+fn test_merge_copies_dict() {
+    // The `merge_copies_dict!` macro calls the `merge_copies_dict` function
+    // with arguments given in pseudo-syntax:
+    //
+    // * `TwoWayPathMap` and path tokenization are implicitly taken care of.
+    //   All paths are given as string literals.
+    // * Key-value maps are represented with `{key1 => value1, key2 => value2}`
+    //   pseudo-syntax.
+    // * `InternalPathCopies` is a map of copy destination path keys to
+    //   `CopySource` values.
+    //   - `CopySource` is represented as a `(rev, source_path, overwritten)`
+    //     tuple of type `(Revision, Option<Path>, OrdSet<Revision>)`.
+    //   - Unlike in `test_compare_value`, source paths are string literals.
+    //   - `OrdSet<Revision>` is again represented as a Python-like set
+    //     literal.
+    // * `get_merge_case` is represented as a map of copy destination path to
+    //   `MergeCase`. The default for paths not in the map is
+    //   `MergeCase::Normal`.
+    //
+    // `internal_path_copies!` creates an `InternalPathCopies` value with the
+    // same pseudo-syntax as in `merge_copies_dict!`.
+
+    use MergeCase::*;
+
+    assert_eq!(
+        merge_copies_dict!(
+            1,
+            {"foo" => (1, None, {})},
+            {},
+            {"foo" => Merged}
+        ),
+        internal_path_copies!("foo" => (1, None, {}))
+    );
+}
+
+/// Unit tests for:
+///
+/// ```ignore
+/// impl CombineChangesetCopies {
+///     fn new(children_count: HashMap<Revision, usize>) -> Self
+///
+///     // Called repeatedly:
+///     fn add_revision_inner<'a>(
+///         &mut self,
+///         rev: Revision,
+///         p1: Revision,
+///         p2: Revision,
+///         copy_actions: impl Iterator<Item = Action<'a>>,
+///         get_merge_case: impl Fn(&HgPath) -> MergeCase + Copy,
+///     )
+///
+///     fn finish(mut self, target_rev: Revision) -> PathCopies
+/// }
+/// ```
+#[test]
+fn test_combine_changeset_copies() {
+    // `combine_changeset_copies!` creates a `CombineChangesetCopies` with
+    // `new`, then calls `add_revision_inner` repeatedly, then calls `finish`
+    // for its return value.
+    //
+    // All paths given as string literals.
+    //
+    // * Key-value maps are represented with `{key1 => value1, key2 => value2}`
+    //   pseudo-syntax.
+    // * `children_count` is a map of revision numbers to count of children in
+    //   the DAG. It includes all revisions that should be considered by the
+    //   algorithm.
+    // * Calls to `add_revision_inner` are represented as an array of anonymous
+    //   structs with named fields, one pseudo-struct per call.
+    //
+    // `path_copies!` creates a `PathCopies` value, a map of copy destination
+    // keys to copy source values. Note: the arrows for map literal syntax
+    // point **backwards** compared to the logical direction of copy!
+
+    use crate::NULL_REVISION as NULL;
+    use Action::*;
+    use MergeCase::*;
+
+    assert_eq!(
+        combine_changeset_copies!(
+            { 1 => 1, 2 => 1 },
+            [
+                { rev: 1, p1: NULL, p2: NULL, actions: [], merge_cases: {}, },
+                { rev: 2, p1: NULL, p2: NULL, actions: [], merge_cases: {}, },
+                {
+                    rev: 3, p1: 1, p2: 2,
+                    actions: [CopiedFromP1("destination.txt", "source.txt")],
+                    merge_cases: {"destination.txt" => Merged},
+                },
+            ],
+            3,
+        ),
+        path_copies!("destination.txt" => "source.txt")
+    );
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/copy_tracing/tests_support.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,199 @@
+//! Supporting macros for `tests.rs` in the same directory.
+//! See comments there for usage.
+
+/// Python-like set literal
+macro_rules! set {
+    (
+        $Type: ty {
+            $( $value: expr ),* $(,)?
+        }
+    ) => {{
+        #[allow(unused_mut)]
+        let mut set = <$Type>::new();
+        $( set.insert($value); )*
+        set
+    }}
+}
+
+/// `{key => value}` map literal
+macro_rules! map {
+    (
+        $Type: ty {
+            $( $key: expr => $value: expr ),* $(,)?
+        }
+    ) => {{
+        #[allow(unused_mut)]
+        let mut set = <$Type>::new();
+        $( set.insert($key, $value); )*
+        set
+    }}
+}
+
+macro_rules! copy_source {
+    ($rev: expr, $path: expr, $overwritten: tt) => {
+        CopySource {
+            rev: $rev,
+            path: $path,
+            overwritten: set!(OrdSet<Revision> $overwritten),
+        }
+    };
+}
+
+macro_rules! compare_value {
+    (
+        $merge_revision: expr,
+        $merge_case_for_dest: ident,
+        ($min_rev: expr, $min_path: expr, $min_overwrite: tt),
+        ($maj_rev: expr, $maj_path: expr, $maj_overwrite: tt) $(,)?
+    ) => {
+        compare_value(
+            $merge_revision,
+            || $merge_case_for_dest,
+            &copy_source!($min_rev, $min_path, $min_overwrite),
+            &copy_source!($maj_rev, $maj_path, $maj_overwrite),
+        )
+    };
+}
+
+macro_rules! tokenized_path_copies {
+    (
+        $path_map: ident, {$(
+            $dest: expr => (
+                $src_rev: expr,
+                $src_path: expr,
+                $src_overwrite: tt
+            )
+        ),*}
+        $(,)*
+    ) => {
+        map!(InternalPathCopies {$(
+            $path_map.tokenize(HgPath::new($dest)) =>
+            copy_source!(
+                $src_rev,
+                Option::map($src_path, |p: &str| {
+                    $path_map.tokenize(HgPath::new(p))
+                }),
+                $src_overwrite
+            )
+        )*})
+    }
+}
+
+macro_rules! merge_case_callback {
+    (
+        $( $merge_path: expr => $merge_case: ident ),*
+        $(,)?
+    ) => {
+        #[allow(unused)]
+        |merge_path| -> MergeCase {
+            $(
+                if (merge_path == HgPath::new($merge_path)) {
+                    return $merge_case
+                }
+            )*
+            MergeCase::Normal
+        }
+    };
+}
+
+macro_rules! merge_copies_dict {
+    (
+        $current_merge: expr,
+        $minor_copies: tt,
+        $major_copies: tt,
+        $get_merge_case: tt $(,)?
+    ) => {
+        {
+            #[allow(unused_mut)]
+            let mut map = TwoWayPathMap::default();
+            let minor = tokenized_path_copies!(map, $minor_copies);
+            let major = tokenized_path_copies!(map, $major_copies);
+            merge_copies_dict(
+                &map, $current_merge, minor, major,
+                merge_case_callback! $get_merge_case,
+            )
+            .into_iter()
+            .map(|(token, source)| {
+                (
+                    map.untokenize(token).to_string(),
+                    (
+                        source.rev,
+                        source.path.map(|t| map.untokenize(t).to_string()),
+                        source.overwritten.into_iter().collect(),
+                    ),
+                )
+            })
+            .collect::<OrdMap<_, _>>()
+        }
+    };
+}
+
+macro_rules! internal_path_copies {
+    (
+        $(
+            $dest: expr => (
+                $src_rev: expr,
+                $src_path: expr,
+                $src_overwrite: tt $(,)?
+            )
+        ),*
+        $(,)*
+    ) => {
+        map!(OrdMap<_, _> {$(
+            String::from($dest) => (
+                $src_rev,
+                $src_path,
+                set!(OrdSet<Revision> $src_overwrite)
+            )
+        ),*})
+    };
+}
+
+macro_rules! combine_changeset_copies {
+    (
+        $children_count: tt,
+        [
+            $(
+                {
+                    rev: $rev: expr,
+                    p1: $p1: expr,
+                    p2: $p2: expr,
+                    actions: [
+                        $(
+                            $Action: ident($( $action_path: expr ),+)
+                        ),*
+                        $(,)?
+                    ],
+                    merge_cases: $merge: tt
+                    $(,)?
+                }
+            ),*
+            $(,)?
+        ],
+        $target_rev: expr $(,)*
+    ) => {{
+        let count = map!(HashMap<Revision, usize> $children_count);
+        let mut combine_changeset_copies = CombineChangesetCopies::new(count);
+        $(
+            let actions = vec![$(
+                $Action($( HgPath::new($action_path) ),*)
+            ),*];
+            combine_changeset_copies.add_revision_inner(
+                $rev, $p1, $p2, actions.into_iter(),
+                merge_case_callback! $merge
+            );
+        )*
+        combine_changeset_copies.finish($target_rev)
+    }};
+}
+
+macro_rules! path_copies {
+    (
+        $( $expected_destination: expr => $expected_source: expr ),* $(,)?
+    ) => {
+        map!(PathCopies {$(
+            HgPath::new($expected_destination).to_owned()
+                => HgPath::new($expected_source).to_owned(),
+        ),*})
+    };
+}
--- a/rust/hg-core/src/dirstate.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/dirstate.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -5,7 +5,10 @@
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
 
-use crate::{utils::hg_path::HgPathBuf, DirstateParseError, FastHashMap};
+use crate::errors::HgError;
+use crate::revlog::Node;
+use crate::{utils::hg_path::HgPathBuf, FastHashMap};
+use bytes_cast::{unaligned, BytesCast};
 use std::collections::hash_map;
 use std::convert::TryFrom;
 
@@ -16,10 +19,11 @@
 pub mod parsers;
 pub mod status;
 
-#[derive(Debug, PartialEq, Clone)]
+#[derive(Debug, PartialEq, Clone, BytesCast)]
+#[repr(C)]
 pub struct DirstateParents {
-    pub p1: [u8; 20],
-    pub p2: [u8; 20],
+    pub p1: Node,
+    pub p2: Node,
 }
 
 /// The C implementation uses all signed types. This will be an issue
@@ -33,6 +37,16 @@
     pub size: i32,
 }
 
+#[derive(BytesCast)]
+#[repr(C)]
+struct RawEntry {
+    state: u8,
+    mode: unaligned::I32Be,
+    size: unaligned::I32Be,
+    mtime: unaligned::I32Be,
+    length: unaligned::I32Be,
+}
+
 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
 /// other parent. This allows revert to pick the right status back during a
 /// merge.
@@ -60,7 +74,7 @@
 }
 
 impl TryFrom<u8> for EntryState {
-    type Error = DirstateParseError;
+    type Error = HgError;
 
     fn try_from(value: u8) -> Result<Self, Self::Error> {
         match value {
@@ -69,8 +83,8 @@
             b'r' => Ok(EntryState::Removed),
             b'm' => Ok(EntryState::Merged),
             b'?' => Ok(EntryState::Unknown),
-            _ => Err(DirstateParseError::CorruptedEntry(format!(
-                "Incorrect entry state {}",
+            _ => Err(HgError::CorruptedRepository(format!(
+                "Incorrect dirstate entry state {}",
                 value
             ))),
         }
--- a/rust/hg-core/src/dirstate/dirstate_map.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/dirstate/dirstate_map.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -5,7 +5,8 @@
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
 
-use crate::revlog::node::NULL_NODE_ID;
+use crate::errors::HgError;
+use crate::revlog::node::NULL_NODE;
 use crate::{
     dirstate::{parsers::PARENT_SIZE, EntryState, SIZE_FROM_OTHER_PARENT},
     pack_dirstate, parse_dirstate,
@@ -14,7 +15,7 @@
         hg_path::{HgPath, HgPathBuf},
     },
     CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateMapError,
-    DirstateParents, DirstateParseError, FastHashMap, StateMap,
+    DirstateParents, FastHashMap, StateMap,
 };
 use micro_timer::timed;
 use std::collections::HashSet;
@@ -72,8 +73,8 @@
         self.non_normal_set = None;
         self.other_parent_set = None;
         self.set_parents(&DirstateParents {
-            p1: NULL_NODE_ID,
-            p2: NULL_NODE_ID,
+            p1: NULL_NODE,
+            p2: NULL_NODE,
         })
     }
 
@@ -366,11 +367,13 @@
             };
         } else if file_contents.is_empty() {
             parents = DirstateParents {
-                p1: NULL_NODE_ID,
-                p2: NULL_NODE_ID,
+                p1: NULL_NODE,
+                p2: NULL_NODE,
             };
         } else {
-            return Err(DirstateError::Parse(DirstateParseError::Damaged));
+            return Err(
+                HgError::corrupted("Dirstate appears to be damaged").into()
+            );
         }
 
         self.parents = Some(parents);
@@ -383,10 +386,10 @@
     }
 
     #[timed]
-    pub fn read(
+    pub fn read<'a>(
         &mut self,
-        file_contents: &[u8],
-    ) -> Result<Option<DirstateParents>, DirstateError> {
+        file_contents: &'a [u8],
+    ) -> Result<Option<&'a DirstateParents>, DirstateError> {
         if file_contents.is_empty() {
             return Ok(None);
         }
--- a/rust/hg-core/src/dirstate/parsers.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/dirstate/parsers.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -3,15 +3,16 @@
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
 
+use crate::errors::HgError;
 use crate::utils::hg_path::HgPath;
 use crate::{
-    dirstate::{CopyMap, EntryState, StateMap},
-    DirstateEntry, DirstatePackError, DirstateParents, DirstateParseError,
+    dirstate::{CopyMap, EntryState, RawEntry, StateMap},
+    DirstateEntry, DirstateParents,
 };
-use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
+use byteorder::{BigEndian, WriteBytesExt};
+use bytes_cast::BytesCast;
 use micro_timer::timed;
 use std::convert::{TryFrom, TryInto};
-use std::io::Cursor;
 use std::time::Duration;
 
 /// Parents are stored in the dirstate as byte hashes.
@@ -20,65 +21,53 @@
 const MIN_ENTRY_SIZE: usize = 17;
 
 type ParseResult<'a> = (
-    DirstateParents,
+    &'a DirstateParents,
     Vec<(&'a HgPath, DirstateEntry)>,
     Vec<(&'a HgPath, &'a HgPath)>,
 );
 
-#[timed]
-pub fn parse_dirstate(
+pub fn parse_dirstate_parents(
     contents: &[u8],
-) -> Result<ParseResult, DirstateParseError> {
-    if contents.len() < PARENT_SIZE * 2 {
-        return Err(DirstateParseError::TooLittleData);
-    }
-    let mut copies = vec![];
-    let mut entries = vec![];
+) -> Result<&DirstateParents, HgError> {
+    let (parents, _rest) = DirstateParents::from_bytes(contents)
+        .map_err(|_| HgError::corrupted("Too little data for dirstate."))?;
+    Ok(parents)
+}
 
-    let mut curr_pos = PARENT_SIZE * 2;
-    let parents = DirstateParents {
-        p1: contents[..PARENT_SIZE].try_into().unwrap(),
-        p2: contents[PARENT_SIZE..curr_pos].try_into().unwrap(),
-    };
+#[timed]
+pub fn parse_dirstate(mut contents: &[u8]) -> Result<ParseResult, HgError> {
+    let mut copies = Vec::new();
+    let mut entries = Vec::new();
 
-    while curr_pos < contents.len() {
-        if curr_pos + MIN_ENTRY_SIZE > contents.len() {
-            return Err(DirstateParseError::Overflow);
-        }
-        let entry_bytes = &contents[curr_pos..];
+    let (parents, rest) = DirstateParents::from_bytes(contents)
+        .map_err(|_| HgError::corrupted("Too little data for dirstate."))?;
+    contents = rest;
+    while !contents.is_empty() {
+        let (raw_entry, rest) = RawEntry::from_bytes(contents)
+            .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
 
-        let mut cursor = Cursor::new(entry_bytes);
-        let state = EntryState::try_from(cursor.read_u8()?)?;
-        let mode = cursor.read_i32::<BigEndian>()?;
-        let size = cursor.read_i32::<BigEndian>()?;
-        let mtime = cursor.read_i32::<BigEndian>()?;
-        let path_len = cursor.read_i32::<BigEndian>()? as usize;
+        let entry = DirstateEntry {
+            state: EntryState::try_from(raw_entry.state)?,
+            mode: raw_entry.mode.get(),
+            mtime: raw_entry.mtime.get(),
+            size: raw_entry.size.get(),
+        };
+        let (paths, rest) =
+            u8::slice_from_bytes(rest, raw_entry.length.get() as usize)
+                .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
 
-        if path_len > contents.len() - curr_pos {
-            return Err(DirstateParseError::Overflow);
+        // `paths` is either a single path, or two paths separated by a NULL
+        // byte
+        let mut iter = paths.splitn(2, |&byte| byte == b'\0');
+        let path = HgPath::new(
+            iter.next().expect("splitn always yields at least one item"),
+        );
+        if let Some(copy_source) = iter.next() {
+            copies.push((path, HgPath::new(copy_source)));
         }
 
-        // Slice instead of allocating a Vec needed for `read_exact`
-        let path = &entry_bytes[MIN_ENTRY_SIZE..MIN_ENTRY_SIZE + (path_len)];
-
-        let (path, copy) = match memchr::memchr(0, path) {
-            None => (path, None),
-            Some(i) => (&path[..i], Some(&path[(i + 1)..])),
-        };
-
-        if let Some(copy_path) = copy {
-            copies.push((HgPath::new(path), HgPath::new(copy_path)));
-        };
-        entries.push((
-            HgPath::new(path),
-            DirstateEntry {
-                state,
-                mode,
-                size,
-                mtime,
-            },
-        ));
-        curr_pos = curr_pos + MIN_ENTRY_SIZE + (path_len);
+        entries.push((path, entry));
+        contents = rest;
     }
     Ok((parents, entries, copies))
 }
@@ -90,7 +79,7 @@
     copy_map: &CopyMap,
     parents: DirstateParents,
     now: Duration,
-) -> Result<Vec<u8>, DirstatePackError> {
+) -> Result<Vec<u8>, HgError> {
     // TODO move away from i32 before 2038.
     let now: i32 = now.as_secs().try_into().expect("time overflow");
 
@@ -108,8 +97,8 @@
 
     let mut packed = Vec::with_capacity(expected_size);
 
-    packed.extend(&parents.p1);
-    packed.extend(&parents.p2);
+    packed.extend(parents.p1.as_bytes());
+    packed.extend(parents.p2.as_bytes());
 
     for (filename, entry) in state_map.iter_mut() {
         let new_filename = filename.to_owned();
@@ -136,16 +125,23 @@
             new_filename.extend(copy.bytes());
         }
 
-        packed.write_u8(entry.state.into())?;
-        packed.write_i32::<BigEndian>(entry.mode)?;
-        packed.write_i32::<BigEndian>(entry.size)?;
-        packed.write_i32::<BigEndian>(new_mtime)?;
-        packed.write_i32::<BigEndian>(new_filename.len() as i32)?;
+        // Unwrapping because `impl std::io::Write for Vec<u8>` never errors
+        packed.write_u8(entry.state.into()).unwrap();
+        packed.write_i32::<BigEndian>(entry.mode).unwrap();
+        packed.write_i32::<BigEndian>(entry.size).unwrap();
+        packed.write_i32::<BigEndian>(new_mtime).unwrap();
+        packed
+            .write_i32::<BigEndian>(new_filename.len() as i32)
+            .unwrap();
         packed.extend(new_filename)
     }
 
     if packed.len() != expected_size {
-        return Err(DirstatePackError::BadSize(expected_size, packed.len()));
+        return Err(HgError::CorruptedRepository(format!(
+            "bad dirstate size: {} != {}",
+            expected_size,
+            packed.len()
+        )));
     }
 
     Ok(packed)
@@ -235,8 +231,8 @@
         let mut state_map = StateMap::default();
         let copymap = FastHashMap::default();
         let parents = DirstateParents {
-            p1: *b"12345678910111213141",
-            p2: *b"00000000000000000000",
+            p1: b"12345678910111213141".into(),
+            p2: b"00000000000000000000".into(),
         };
         let now = Duration::new(15000000, 0);
         let expected = b"1234567891011121314100000000000000000000".to_vec();
@@ -266,8 +262,8 @@
 
         let copymap = FastHashMap::default();
         let parents = DirstateParents {
-            p1: *b"12345678910111213141",
-            p2: *b"00000000000000000000",
+            p1: b"12345678910111213141".into(),
+            p2: b"00000000000000000000".into(),
         };
         let now = Duration::new(15000000, 0);
         let expected = [
@@ -306,8 +302,8 @@
             HgPathBuf::from_bytes(b"copyname"),
         );
         let parents = DirstateParents {
-            p1: *b"12345678910111213141",
-            p2: *b"00000000000000000000",
+            p1: b"12345678910111213141".into(),
+            p2: b"00000000000000000000".into(),
         };
         let now = Duration::new(15000000, 0);
         let expected = [
@@ -346,8 +342,8 @@
             HgPathBuf::from_bytes(b"copyname"),
         );
         let parents = DirstateParents {
-            p1: *b"12345678910111213141",
-            p2: *b"00000000000000000000",
+            p1: b"12345678910111213141".into(),
+            p2: b"00000000000000000000".into(),
         };
         let now = Duration::new(15000000, 0);
         let result =
@@ -366,7 +362,7 @@
             .collect();
 
         assert_eq!(
-            (parents, state_map, copymap),
+            (&parents, state_map, copymap),
             (new_parents, new_state_map, new_copy_map)
         )
     }
@@ -424,8 +420,8 @@
             HgPathBuf::from_bytes(b"copyname2"),
         );
         let parents = DirstateParents {
-            p1: *b"12345678910111213141",
-            p2: *b"00000000000000000000",
+            p1: b"12345678910111213141".into(),
+            p2: b"00000000000000000000".into(),
         };
         let now = Duration::new(15000000, 0);
         let result =
@@ -444,7 +440,7 @@
             .collect();
 
         assert_eq!(
-            (parents, state_map, copymap),
+            (&parents, state_map, copymap),
             (new_parents, new_state_map, new_copy_map)
         )
     }
@@ -470,8 +466,8 @@
             HgPathBuf::from_bytes(b"copyname"),
         );
         let parents = DirstateParents {
-            p1: *b"12345678910111213141",
-            p2: *b"00000000000000000000",
+            p1: b"12345678910111213141".into(),
+            p2: b"00000000000000000000".into(),
         };
         let now = Duration::new(15000000, 0);
         let result =
@@ -491,7 +487,7 @@
 
         assert_eq!(
             (
-                parents,
+                &parents,
                 [(
                     HgPathBuf::from_bytes(b"f1"),
                     DirstateEntry {
--- a/rust/hg-core/src/dirstate/status.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/dirstate/status.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -33,6 +33,7 @@
 use std::{
     borrow::Cow,
     collections::HashSet,
+    fmt,
     fs::{read_dir, DirEntry},
     io::ErrorKind,
     ops::Deref,
@@ -51,17 +52,16 @@
     Unknown,
 }
 
-impl ToString for BadType {
-    fn to_string(&self) -> String {
-        match self {
+impl fmt::Display for BadType {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.write_str(match self {
             BadType::CharacterDevice => "character device",
             BadType::BlockDevice => "block device",
             BadType::FIFO => "fifo",
             BadType::Socket => "socket",
             BadType::Directory => "directory",
             BadType::Unknown => "unknown",
-        }
-        .to_string()
+        })
     }
 }
 
@@ -184,7 +184,13 @@
                 || other_parent
                 || copy_map.contains_key(filename.as_ref())
             {
-                Dispatch::Modified
+                if metadata.is_symlink() && size_changed {
+                    // issue6456: Size returned may be longer due to encryption
+                    // on EXT-4 fscrypt. TODO maybe only do it on EXT4?
+                    Dispatch::Unsure
+                } else {
+                    Dispatch::Modified
+                }
             } else if mod_compare(mtime, st_mtime as i32)
                 || st_mtime == options.last_normal_time
             {
@@ -265,7 +271,7 @@
     pub traversed: Vec<HgPathBuf>,
 }
 
-#[derive(Debug)]
+#[derive(Debug, derive_more::From)]
 pub enum StatusError {
     /// Generic IO error
     IO(std::io::Error),
@@ -277,28 +283,12 @@
 
 pub type StatusResult<T> = Result<T, StatusError>;
 
-impl From<PatternError> for StatusError {
-    fn from(e: PatternError) -> Self {
-        StatusError::Pattern(e)
-    }
-}
-impl From<HgPathError> for StatusError {
-    fn from(e: HgPathError) -> Self {
-        StatusError::Path(e)
-    }
-}
-impl From<std::io::Error> for StatusError {
-    fn from(e: std::io::Error) -> Self {
-        StatusError::IO(e)
-    }
-}
-
-impl ToString for StatusError {
-    fn to_string(&self) -> String {
+impl fmt::Display for StatusError {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         match self {
-            StatusError::IO(e) => e.to_string(),
-            StatusError::Path(e) => e.to_string(),
-            StatusError::Pattern(e) => e.to_string(),
+            StatusError::IO(error) => error.fmt(f),
+            StatusError::Path(error) => error.fmt(f),
+            StatusError::Pattern(error) => error.fmt(f),
         }
     }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/errors.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,201 @@
+use crate::config::ConfigValueParseError;
+use std::fmt;
+
+/// Common error cases that can happen in many different APIs
+#[derive(Debug, derive_more::From)]
+pub enum HgError {
+    IoError {
+        error: std::io::Error,
+        context: IoErrorContext,
+    },
+
+    /// A file under `.hg/` normally only written by Mercurial is not in the
+    /// expected format. This indicates a bug in Mercurial, filesystem
+    /// corruption, or hardware failure.
+    ///
+    /// The given string is a short explanation for users, not intended to be
+    /// machine-readable.
+    CorruptedRepository(String),
+
+    /// The respository or requested operation involves a feature not
+    /// supported by the Rust implementation. Falling back to the Python
+    /// implementation may or may not work.
+    ///
+    /// The given string is a short explanation for users, not intended to be
+    /// machine-readable.
+    UnsupportedFeature(String),
+
+    /// Operation cannot proceed for some other reason.
+    ///
+    /// The given string is a short explanation for users, not intended to be
+    /// machine-readable.
+    Abort(String),
+
+    /// A configuration value is not in the expected syntax.
+    ///
+    /// These errors can happen in many places in the code because values are
+    /// parsed lazily as the file-level parser does not know the expected type
+    /// and syntax of each value.
+    #[from]
+    ConfigValueParseError(ConfigValueParseError),
+}
+
+/// Details about where an I/O error happened
+#[derive(Debug)]
+pub enum IoErrorContext {
+    ReadingFile(std::path::PathBuf),
+    WritingFile(std::path::PathBuf),
+    RemovingFile(std::path::PathBuf),
+    RenamingFile {
+        from: std::path::PathBuf,
+        to: std::path::PathBuf,
+    },
+    /// `std::fs::canonicalize`
+    CanonicalizingPath(std::path::PathBuf),
+    /// `std::env::current_dir`
+    CurrentDir,
+    /// `std::env::current_exe`
+    CurrentExe,
+}
+
+impl HgError {
+    pub fn corrupted(explanation: impl Into<String>) -> Self {
+        // TODO: capture a backtrace here and keep it in the error value
+        // to aid debugging?
+        // https://doc.rust-lang.org/std/backtrace/struct.Backtrace.html
+        HgError::CorruptedRepository(explanation.into())
+    }
+
+    pub fn unsupported(explanation: impl Into<String>) -> Self {
+        HgError::UnsupportedFeature(explanation.into())
+    }
+    pub fn abort(explanation: impl Into<String>) -> Self {
+        HgError::Abort(explanation.into())
+    }
+}
+
+// TODO: use `DisplayBytes` instead to show non-Unicode filenames losslessly?
+impl fmt::Display for HgError {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self {
+            HgError::Abort(explanation) => write!(f, "{}", explanation),
+            HgError::IoError { error, context } => {
+                write!(f, "abort: {}: {}", context, error)
+            }
+            HgError::CorruptedRepository(explanation) => {
+                write!(f, "abort: {}", explanation)
+            }
+            HgError::UnsupportedFeature(explanation) => {
+                write!(f, "unsupported feature: {}", explanation)
+            }
+            HgError::ConfigValueParseError(ConfigValueParseError {
+                origin: _,
+                line: _,
+                section,
+                item,
+                value,
+                expected_type,
+            }) => {
+                // TODO: add origin and line number information, here and in
+                // corresponding python code
+                write!(
+                    f,
+                    "config error: {}.{} is not a {} ('{}')",
+                    String::from_utf8_lossy(section),
+                    String::from_utf8_lossy(item),
+                    expected_type,
+                    String::from_utf8_lossy(value)
+                )
+            }
+        }
+    }
+}
+
+// TODO: use `DisplayBytes` instead to show non-Unicode filenames losslessly?
+impl fmt::Display for IoErrorContext {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self {
+            IoErrorContext::ReadingFile(path) => {
+                write!(f, "when reading {}", path.display())
+            }
+            IoErrorContext::WritingFile(path) => {
+                write!(f, "when writing {}", path.display())
+            }
+            IoErrorContext::RemovingFile(path) => {
+                write!(f, "when removing {}", path.display())
+            }
+            IoErrorContext::RenamingFile { from, to } => write!(
+                f,
+                "when renaming {} to {}",
+                from.display(),
+                to.display()
+            ),
+            IoErrorContext::CanonicalizingPath(path) => {
+                write!(f, "when canonicalizing {}", path.display())
+            }
+            IoErrorContext::CurrentDir => {
+                write!(f, "error getting current working directory")
+            }
+            IoErrorContext::CurrentExe => {
+                write!(f, "error getting current executable")
+            }
+        }
+    }
+}
+
+pub trait IoResultExt<T> {
+    /// Annotate a possible I/O error as related to a reading a file at the
+    /// given path.
+    ///
+    /// This allows printing something like “File not found when reading
+    /// example.txt” instead of just “File not found”.
+    ///
+    /// Converts a `Result` with `std::io::Error` into one with `HgError`.
+    fn when_reading_file(self, path: &std::path::Path) -> Result<T, HgError>;
+
+    fn with_context(
+        self,
+        context: impl FnOnce() -> IoErrorContext,
+    ) -> Result<T, HgError>;
+}
+
+impl<T> IoResultExt<T> for std::io::Result<T> {
+    fn when_reading_file(self, path: &std::path::Path) -> Result<T, HgError> {
+        self.with_context(|| IoErrorContext::ReadingFile(path.to_owned()))
+    }
+
+    fn with_context(
+        self,
+        context: impl FnOnce() -> IoErrorContext,
+    ) -> Result<T, HgError> {
+        self.map_err(|error| HgError::IoError {
+            error,
+            context: context(),
+        })
+    }
+}
+
+pub trait HgResultExt<T> {
+    /// Handle missing files separately from other I/O error cases.
+    ///
+    /// Wraps the `Ok` type in an `Option`:
+    ///
+    /// * `Ok(x)` becomes `Ok(Some(x))`
+    /// * An I/O "not found" error becomes `Ok(None)`
+    /// * Other errors are unchanged
+    fn io_not_found_as_none(self) -> Result<Option<T>, HgError>;
+}
+
+impl<T> HgResultExt<T> for Result<T, HgError> {
+    fn io_not_found_as_none(self) -> Result<Option<T>, HgError> {
+        match self {
+            Ok(x) => Ok(Some(x)),
+            Err(HgError::IoError { error, .. })
+                if error.kind() == std::io::ErrorKind::NotFound =>
+            {
+                Ok(None)
+            }
+            Err(other_error) => Err(other_error),
+        }
+    }
+}
--- a/rust/hg-core/src/lib.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/lib.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -3,8 +3,10 @@
 //
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
+
 mod ancestors;
 pub mod dagops;
+pub mod errors;
 pub use ancestors::{AncestorsIterator, LazyAncestors, MissingAncestors};
 mod dirstate;
 pub mod discovery;
@@ -27,23 +29,18 @@
 pub mod revlog;
 pub use revlog::*;
 pub mod config;
+pub mod logging;
 pub mod operations;
+pub mod revset;
 pub mod utils;
 
-// Remove this to see (potential) non-artificial compile failures. MacOS
-// *should* compile, but fail to compile tests for example as of 2020-03-06
-#[cfg(not(target_os = "linux"))]
-compile_error!(
-    "`hg-core` has only been tested on Linux and will most \
-     likely not behave correctly on other platforms."
-);
-
 use crate::utils::hg_path::{HgPathBuf, HgPathError};
 pub use filepatterns::{
     parse_pattern_syntax, read_pattern_file, IgnorePattern,
     PatternFileWarning, PatternSyntax,
 };
 use std::collections::HashMap;
+use std::fmt;
 use twox_hash::RandomXxHashBuilder64;
 
 /// This is a contract between the `micro-timer` crate and us, to expose
@@ -57,45 +54,6 @@
 /// write access to your repository, you have other issues.
 pub type FastHashMap<K, V> = HashMap<K, V, RandomXxHashBuilder64>;
 
-#[derive(Clone, Debug, PartialEq)]
-pub enum DirstateParseError {
-    TooLittleData,
-    Overflow,
-    // TODO refactor to use bytes instead of String
-    CorruptedEntry(String),
-    Damaged,
-}
-
-impl From<std::io::Error> for DirstateParseError {
-    fn from(e: std::io::Error) -> Self {
-        DirstateParseError::CorruptedEntry(e.to_string())
-    }
-}
-
-impl ToString for DirstateParseError {
-    fn to_string(&self) -> String {
-        use crate::DirstateParseError::*;
-        match self {
-            TooLittleData => "Too little data for dirstate.".to_string(),
-            Overflow => "Overflow in dirstate.".to_string(),
-            CorruptedEntry(e) => format!("Corrupted entry: {:?}.", e),
-            Damaged => "Dirstate appears to be damaged.".to_string(),
-        }
-    }
-}
-
-#[derive(Debug, PartialEq)]
-pub enum DirstatePackError {
-    CorruptedEntry(String),
-    CorruptedParent,
-    BadSize(usize, usize),
-}
-
-impl From<std::io::Error> for DirstatePackError {
-    fn from(e: std::io::Error) -> Self {
-        DirstatePackError::CorruptedEntry(e.to_string())
-    }
-}
 #[derive(Debug, PartialEq)]
 pub enum DirstateMapError {
     PathNotFound(HgPathBuf),
@@ -103,94 +61,61 @@
     InvalidPath(HgPathError),
 }
 
-impl ToString for DirstateMapError {
-    fn to_string(&self) -> String {
+impl fmt::Display for DirstateMapError {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         match self {
             DirstateMapError::PathNotFound(_) => {
-                "expected a value, found none".to_string()
+                f.write_str("expected a value, found none")
             }
-            DirstateMapError::EmptyPath => "Overflow in dirstate.".to_string(),
-            DirstateMapError::InvalidPath(e) => e.to_string(),
+            DirstateMapError::EmptyPath => {
+                f.write_str("Overflow in dirstate.")
+            }
+            DirstateMapError::InvalidPath(path_error) => path_error.fmt(f),
         }
     }
 }
 
-#[derive(Debug)]
+#[derive(Debug, derive_more::From)]
 pub enum DirstateError {
-    Parse(DirstateParseError),
-    Pack(DirstatePackError),
     Map(DirstateMapError),
-    IO(std::io::Error),
+    Common(errors::HgError),
 }
 
-impl From<DirstateParseError> for DirstateError {
-    fn from(e: DirstateParseError) -> Self {
-        DirstateError::Parse(e)
-    }
-}
-
-impl From<DirstatePackError> for DirstateError {
-    fn from(e: DirstatePackError) -> Self {
-        DirstateError::Pack(e)
-    }
-}
-
-#[derive(Debug)]
+#[derive(Debug, derive_more::From)]
 pub enum PatternError {
+    #[from]
     Path(HgPathError),
     UnsupportedSyntax(String),
     UnsupportedSyntaxInFile(String, String, usize),
     TooLong(usize),
+    #[from]
     IO(std::io::Error),
     /// Needed a pattern that can be turned into a regex but got one that
     /// can't. This should only happen through programmer error.
     NonRegexPattern(IgnorePattern),
 }
 
-impl ToString for PatternError {
-    fn to_string(&self) -> String {
+impl fmt::Display for PatternError {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         match self {
             PatternError::UnsupportedSyntax(syntax) => {
-                format!("Unsupported syntax {}", syntax)
+                write!(f, "Unsupported syntax {}", syntax)
             }
             PatternError::UnsupportedSyntaxInFile(syntax, file_path, line) => {
-                format!(
+                write!(
+                    f,
                     "{}:{}: unsupported syntax {}",
                     file_path, line, syntax
                 )
             }
             PatternError::TooLong(size) => {
-                format!("matcher pattern is too long ({} bytes)", size)
+                write!(f, "matcher pattern is too long ({} bytes)", size)
             }
-            PatternError::IO(e) => e.to_string(),
-            PatternError::Path(e) => e.to_string(),
+            PatternError::IO(error) => error.fmt(f),
+            PatternError::Path(error) => error.fmt(f),
             PatternError::NonRegexPattern(pattern) => {
-                format!("'{:?}' cannot be turned into a regex", pattern)
+                write!(f, "'{:?}' cannot be turned into a regex", pattern)
             }
         }
     }
 }
-
-impl From<DirstateMapError> for DirstateError {
-    fn from(e: DirstateMapError) -> Self {
-        DirstateError::Map(e)
-    }
-}
-
-impl From<std::io::Error> for DirstateError {
-    fn from(e: std::io::Error) -> Self {
-        DirstateError::IO(e)
-    }
-}
-
-impl From<std::io::Error> for PatternError {
-    fn from(e: std::io::Error) -> Self {
-        PatternError::IO(e)
-    }
-}
-
-impl From<HgPathError> for PatternError {
-    fn from(e: HgPathError) -> Self {
-        PatternError::Path(e)
-    }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/logging.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,101 @@
+use crate::errors::{HgError, HgResultExt, IoErrorContext, IoResultExt};
+use crate::repo::Vfs;
+use std::io::Write;
+
+/// An utility to append to a log file with the given name, and optionally
+/// rotate it after it reaches a certain maximum size.
+///
+/// Rotation works by renaming "example.log" to "example.log.1", after renaming
+/// "example.log.1" to "example.log.2" etc up to the given maximum number of
+/// files.
+pub struct LogFile<'a> {
+    vfs: Vfs<'a>,
+    name: &'a str,
+    max_size: Option<u64>,
+    max_files: u32,
+}
+
+impl<'a> LogFile<'a> {
+    pub fn new(vfs: Vfs<'a>, name: &'a str) -> Self {
+        Self {
+            vfs,
+            name,
+            max_size: None,
+            max_files: 0,
+        }
+    }
+
+    /// Rotate before writing to a log file that was already larger than the
+    /// given size, in bytes. `None` disables rotation.
+    pub fn max_size(mut self, value: Option<u64>) -> Self {
+        self.max_size = value;
+        self
+    }
+
+    /// Keep this many rotated files `{name}.1` up to `{name}.{max}`, in
+    /// addition to the original `{name}` file.
+    pub fn max_files(mut self, value: u32) -> Self {
+        self.max_files = value;
+        self
+    }
+
+    /// Append the given `bytes` as-is to the log file, after rotating if
+    /// needed.
+    ///
+    /// No trailing newline is added. Make sure to include one in `bytes` if
+    /// desired.
+    pub fn write(&self, bytes: &[u8]) -> Result<(), HgError> {
+        let path = self.vfs.join(self.name);
+        let context = || IoErrorContext::WritingFile(path.clone());
+        let open = || {
+            std::fs::OpenOptions::new()
+                .create(true)
+                .append(true)
+                .open(&path)
+                .with_context(context)
+        };
+        let mut file = open()?;
+        if let Some(max_size) = self.max_size {
+            if file.metadata().with_context(context)?.len() >= max_size {
+                // For example with `max_files == 5`, the first iteration of
+                // this loop has `i == 4` and renames `{name}.4` to `{name}.5`.
+                // The last iteration renames `{name}.1` to
+                // `{name}.2`
+                for i in (1..self.max_files).rev() {
+                    self.vfs
+                        .rename(
+                            format!("{}.{}", self.name, i),
+                            format!("{}.{}", self.name, i + 1),
+                        )
+                        .io_not_found_as_none()?;
+                }
+                // Then rename `{name}` to `{name}.1`. This is the
+                // previously-opened `file`.
+                self.vfs
+                    .rename(self.name, format!("{}.1", self.name))
+                    .io_not_found_as_none()?;
+                // Finally, create a new `{name}` file and replace our `file`
+                // handle.
+                file = open()?;
+            }
+        }
+        file.write_all(bytes).with_context(context)?;
+        file.sync_all().with_context(context)
+    }
+}
+
+#[test]
+fn test_rotation() {
+    let temp = tempfile::tempdir().unwrap();
+    let vfs = Vfs { base: temp.path() };
+    let logger = LogFile::new(vfs, "log").max_size(Some(3)).max_files(2);
+    logger.write(b"one\n").unwrap();
+    logger.write(b"two\n").unwrap();
+    logger.write(b"3\n").unwrap();
+    logger.write(b"four\n").unwrap();
+    logger.write(b"five\n").unwrap();
+    assert_eq!(vfs.read("log").unwrap(), b"five\n");
+    assert_eq!(vfs.read("log.1").unwrap(), b"3\nfour\n");
+    assert_eq!(vfs.read("log.2").unwrap(), b"two\n");
+    assert!(vfs.read("log.3").io_not_found_as_none().unwrap().is_none());
+}
--- a/rust/hg-core/src/operations/cat.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/operations/cat.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -5,7 +5,6 @@
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
 
-use std::convert::From;
 use std::path::PathBuf;
 
 use crate::repo::Repo;
@@ -15,99 +14,59 @@
 use crate::revlog::revlog::Revlog;
 use crate::revlog::revlog::RevlogError;
 use crate::revlog::Node;
-use crate::revlog::NodePrefix;
-use crate::revlog::Revision;
 use crate::utils::files::get_path_from_bytes;
 use crate::utils::hg_path::{HgPath, HgPathBuf};
 
+pub struct CatOutput {
+    /// Whether any file in the manifest matched the paths given as CLI
+    /// arguments
+    pub found_any: bool,
+    /// The contents of matching files, in manifest order
+    pub concatenated: Vec<u8>,
+    /// Which of the CLI arguments did not match any manifest file
+    pub missing: Vec<HgPathBuf>,
+    /// The node ID that the given revset was resolved to
+    pub node: Node,
+}
+
 const METADATA_DELIMITER: [u8; 2] = [b'\x01', b'\n'];
 
-/// Kind of error encountered by `CatRev`
-#[derive(Debug)]
-pub enum CatRevErrorKind {
-    /// Error when reading a `revlog` file.
-    IoError(std::io::Error),
-    /// The revision has not been found.
-    InvalidRevision,
-    /// Found more than one revision whose ID match the requested prefix
-    AmbiguousPrefix,
-    /// A `revlog` file is corrupted.
-    CorruptedRevlog,
-    /// The `revlog` format version is not supported.
-    UnsuportedRevlogVersion(u16),
-    /// The `revlog` data format is not supported.
-    UnknowRevlogDataFormat(u8),
-}
-
-/// A `CatRev` error
-#[derive(Debug)]
-pub struct CatRevError {
-    /// Kind of error encountered by `CatRev`
-    pub kind: CatRevErrorKind,
-}
-
-impl From<CatRevErrorKind> for CatRevError {
-    fn from(kind: CatRevErrorKind) -> Self {
-        CatRevError { kind }
-    }
-}
-
-impl From<RevlogError> for CatRevError {
-    fn from(err: RevlogError) -> Self {
-        match err {
-            RevlogError::IoError(err) => CatRevErrorKind::IoError(err),
-            RevlogError::UnsuportedVersion(version) => {
-                CatRevErrorKind::UnsuportedRevlogVersion(version)
-            }
-            RevlogError::InvalidRevision => CatRevErrorKind::InvalidRevision,
-            RevlogError::AmbiguousPrefix => CatRevErrorKind::AmbiguousPrefix,
-            RevlogError::Corrupted => CatRevErrorKind::CorruptedRevlog,
-            RevlogError::UnknowDataFormat(format) => {
-                CatRevErrorKind::UnknowRevlogDataFormat(format)
-            }
-        }
-        .into()
-    }
-}
-
-/// List files under Mercurial control at a given revision.
+/// Output the given revision of files
 ///
 /// * `root`: Repository root
 /// * `rev`: The revision to cat the files from.
 /// * `files`: The files to output.
-pub fn cat(
+pub fn cat<'a>(
     repo: &Repo,
-    rev: &str,
-    files: &[HgPathBuf],
-) -> Result<Vec<u8>, CatRevError> {
+    revset: &str,
+    files: &'a [HgPathBuf],
+) -> Result<CatOutput, RevlogError> {
+    let rev = crate::revset::resolve_single(revset, repo)?;
     let changelog = Changelog::open(repo)?;
     let manifest = Manifest::open(repo)?;
-
-    let changelog_entry = match rev.parse::<Revision>() {
-        Ok(rev) => changelog.get_rev(rev)?,
-        _ => {
-            let changelog_node = NodePrefix::from_hex(&rev)
-                .map_err(|_| CatRevErrorKind::InvalidRevision)?;
-            changelog.get_node(changelog_node.borrow())?
-        }
-    };
-    let manifest_node = Node::from_hex(&changelog_entry.manifest_node()?)
-        .map_err(|_| CatRevErrorKind::CorruptedRevlog)?;
-
-    let manifest_entry = manifest.get_node((&manifest_node).into())?;
+    let changelog_entry = changelog.get_rev(rev)?;
+    let node = *changelog
+        .node_from_rev(rev)
+        .expect("should succeed when changelog.get_rev did");
+    let manifest_node =
+        Node::from_hex_for_repo(&changelog_entry.manifest_node()?)?;
+    let manifest_entry = manifest.get_node(manifest_node.into())?;
     let mut bytes = vec![];
+    let mut matched = vec![false; files.len()];
+    let mut found_any = false;
 
     for (manifest_file, node_bytes) in manifest_entry.files_with_nodes() {
-        for cat_file in files.iter() {
+        for (cat_file, is_matched) in files.iter().zip(&mut matched) {
             if cat_file.as_bytes() == manifest_file.as_bytes() {
+                *is_matched = true;
+                found_any = true;
                 let index_path = store_path(manifest_file, b".i");
                 let data_path = store_path(manifest_file, b".d");
 
                 let file_log =
                     Revlog::open(repo, &index_path, Some(&data_path))?;
-                let file_node = Node::from_hex(node_bytes)
-                    .map_err(|_| CatRevErrorKind::CorruptedRevlog)?;
-                let file_rev = file_log.get_node_rev((&file_node).into())?;
+                let file_node = Node::from_hex_for_repo(node_bytes)?;
+                let file_rev = file_log.get_node_rev(file_node.into())?;
                 let data = file_log.get_rev_data(file_rev)?;
                 if data.starts_with(&METADATA_DELIMITER) {
                     let end_delimiter_position = data
@@ -125,7 +84,18 @@
         }
     }
 
-    Ok(bytes)
+    let missing: Vec<_> = files
+        .iter()
+        .zip(&matched)
+        .filter(|pair| !*pair.1)
+        .map(|pair| pair.0.clone())
+        .collect();
+    Ok(CatOutput {
+        found_any,
+        concatenated: bytes,
+        missing,
+        node,
+    })
 }
 
 fn store_path(hg_path: &HgPath, suffix: &[u8]) -> PathBuf {
--- a/rust/hg-core/src/operations/debugdata.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/operations/debugdata.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -7,8 +7,6 @@
 
 use crate::repo::Repo;
 use crate::revlog::revlog::{Revlog, RevlogError};
-use crate::revlog::NodePrefix;
-use crate::revlog::Revision;
 
 /// Kind of data to debug
 #[derive(Debug, Copy, Clone)]
@@ -17,86 +15,19 @@
     Manifest,
 }
 
-/// Kind of error encountered by DebugData
-#[derive(Debug)]
-pub enum DebugDataErrorKind {
-    /// Error when reading a `revlog` file.
-    IoError(std::io::Error),
-    /// The revision has not been found.
-    InvalidRevision,
-    /// Found more than one revision whose ID match the requested prefix
-    AmbiguousPrefix,
-    /// A `revlog` file is corrupted.
-    CorruptedRevlog,
-    /// The `revlog` format version is not supported.
-    UnsuportedRevlogVersion(u16),
-    /// The `revlog` data format is not supported.
-    UnknowRevlogDataFormat(u8),
-}
-
-/// A DebugData error
-#[derive(Debug)]
-pub struct DebugDataError {
-    /// Kind of error encountered by DebugData
-    pub kind: DebugDataErrorKind,
-}
-
-impl From<DebugDataErrorKind> for DebugDataError {
-    fn from(kind: DebugDataErrorKind) -> Self {
-        DebugDataError { kind }
-    }
-}
-
-impl From<std::io::Error> for DebugDataError {
-    fn from(err: std::io::Error) -> Self {
-        let kind = DebugDataErrorKind::IoError(err);
-        DebugDataError { kind }
-    }
-}
-
-impl From<RevlogError> for DebugDataError {
-    fn from(err: RevlogError) -> Self {
-        match err {
-            RevlogError::IoError(err) => DebugDataErrorKind::IoError(err),
-            RevlogError::UnsuportedVersion(version) => {
-                DebugDataErrorKind::UnsuportedRevlogVersion(version)
-            }
-            RevlogError::InvalidRevision => {
-                DebugDataErrorKind::InvalidRevision
-            }
-            RevlogError::AmbiguousPrefix => {
-                DebugDataErrorKind::AmbiguousPrefix
-            }
-            RevlogError::Corrupted => DebugDataErrorKind::CorruptedRevlog,
-            RevlogError::UnknowDataFormat(format) => {
-                DebugDataErrorKind::UnknowRevlogDataFormat(format)
-            }
-        }
-        .into()
-    }
-}
-
 /// Dump the contents data of a revision.
 pub fn debug_data(
     repo: &Repo,
-    rev: &str,
+    revset: &str,
     kind: DebugDataKind,
-) -> Result<Vec<u8>, DebugDataError> {
+) -> Result<Vec<u8>, RevlogError> {
     let index_file = match kind {
         DebugDataKind::Changelog => "00changelog.i",
         DebugDataKind::Manifest => "00manifest.i",
     };
     let revlog = Revlog::open(repo, index_file, None)?;
-
-    let data = match rev.parse::<Revision>() {
-        Ok(rev) => revlog.get_rev_data(rev)?,
-        _ => {
-            let node = NodePrefix::from_hex(&rev)
-                .map_err(|_| DebugDataErrorKind::InvalidRevision)?;
-            let rev = revlog.get_node_rev(node.borrow())?;
-            revlog.get_rev_data(rev)?
-        }
-    };
-
+    let rev =
+        crate::revset::resolve_rev_number_or_hex_prefix(revset, &revlog)?;
+    let data = revlog.get_rev_data(rev)?;
     Ok(data)
 }
--- a/rust/hg-core/src/operations/find_root.rs	Sat Mar 13 02:09:23 2021 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,100 +0,0 @@
-use std::fmt;
-use std::path::{Path, PathBuf};
-
-/// Kind of error encoutered by FindRoot
-#[derive(Debug)]
-pub enum FindRootErrorKind {
-    /// Root of the repository has not been found
-    /// Contains the current directory used by FindRoot
-    RootNotFound(PathBuf),
-    /// The current directory does not exists or permissions are insufficient
-    /// to get access to it
-    GetCurrentDirError(std::io::Error),
-}
-
-/// A FindRoot error
-#[derive(Debug)]
-pub struct FindRootError {
-    /// Kind of error encoutered by FindRoot
-    pub kind: FindRootErrorKind,
-}
-
-impl std::error::Error for FindRootError {}
-
-impl fmt::Display for FindRootError {
-    fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        unimplemented!()
-    }
-}
-
-/// Find the root of the repository
-/// by searching for a .hg directory in the process’ current directory and its
-/// ancestors
-pub fn find_root() -> Result<PathBuf, FindRootError> {
-    let current_dir = std::env::current_dir().map_err(|e| FindRootError {
-        kind: FindRootErrorKind::GetCurrentDirError(e),
-    })?;
-    Ok(find_root_from_path(&current_dir)?.into())
-}
-
-/// Find the root of the repository
-/// by searching for a .hg directory in the given directory and its ancestors
-pub fn find_root_from_path(start: &Path) -> Result<&Path, FindRootError> {
-    if start.join(".hg").exists() {
-        return Ok(start);
-    }
-    for ancestor in start.ancestors() {
-        if ancestor.join(".hg").exists() {
-            return Ok(ancestor);
-        }
-    }
-    Err(FindRootError {
-        kind: FindRootErrorKind::RootNotFound(start.into()),
-    })
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-    use std::fs;
-    use tempfile;
-
-    #[test]
-    fn dot_hg_not_found() {
-        let tmp_dir = tempfile::tempdir().unwrap();
-        let path = tmp_dir.path();
-
-        let err = find_root_from_path(&path).unwrap_err();
-
-        // TODO do something better
-        assert!(match err {
-            FindRootError { kind } => match kind {
-                FindRootErrorKind::RootNotFound(p) => p == path.to_path_buf(),
-                _ => false,
-            },
-        })
-    }
-
-    #[test]
-    fn dot_hg_in_current_path() {
-        let tmp_dir = tempfile::tempdir().unwrap();
-        let root = tmp_dir.path();
-        fs::create_dir_all(root.join(".hg")).unwrap();
-
-        let result = find_root_from_path(&root).unwrap();
-
-        assert_eq!(result, root)
-    }
-
-    #[test]
-    fn dot_hg_in_parent() {
-        let tmp_dir = tempfile::tempdir().unwrap();
-        let root = tmp_dir.path();
-        fs::create_dir_all(root.join(".hg")).unwrap();
-
-        let directory = root.join("some/nested/directory");
-        let result = find_root_from_path(&directory).unwrap();
-
-        assert_eq!(result, root)
-    }
-} /* tests */
--- a/rust/hg-core/src/operations/list_tracked_files.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/operations/list_tracked_files.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -6,47 +6,15 @@
 // GNU General Public License version 2 or any later version.
 
 use crate::dirstate::parsers::parse_dirstate;
+use crate::errors::HgError;
 use crate::repo::Repo;
 use crate::revlog::changelog::Changelog;
 use crate::revlog::manifest::{Manifest, ManifestEntry};
-use crate::revlog::node::{Node, NodePrefix};
+use crate::revlog::node::Node;
 use crate::revlog::revlog::RevlogError;
-use crate::revlog::Revision;
 use crate::utils::hg_path::HgPath;
-use crate::{DirstateParseError, EntryState};
+use crate::EntryState;
 use rayon::prelude::*;
-use std::convert::From;
-
-/// Kind of error encountered by `ListDirstateTrackedFiles`
-#[derive(Debug)]
-pub enum ListDirstateTrackedFilesErrorKind {
-    /// Error when reading the `dirstate` file
-    IoError(std::io::Error),
-    /// Error when parsing the `dirstate` file
-    ParseError(DirstateParseError),
-}
-
-/// A `ListDirstateTrackedFiles` error
-#[derive(Debug)]
-pub struct ListDirstateTrackedFilesError {
-    /// Kind of error encountered by `ListDirstateTrackedFiles`
-    pub kind: ListDirstateTrackedFilesErrorKind,
-}
-
-impl From<ListDirstateTrackedFilesErrorKind>
-    for ListDirstateTrackedFilesError
-{
-    fn from(kind: ListDirstateTrackedFilesErrorKind) -> Self {
-        ListDirstateTrackedFilesError { kind }
-    }
-}
-
-impl From<std::io::Error> for ListDirstateTrackedFilesError {
-    fn from(err: std::io::Error) -> Self {
-        let kind = ListDirstateTrackedFilesErrorKind::IoError(err);
-        ListDirstateTrackedFilesError { kind }
-    }
-}
 
 /// List files under Mercurial control in the working directory
 /// by reading the dirstate
@@ -56,16 +24,13 @@
 }
 
 impl Dirstate {
-    pub fn new(repo: &Repo) -> Result<Self, ListDirstateTrackedFilesError> {
+    pub fn new(repo: &Repo) -> Result<Self, HgError> {
         let content = repo.hg_vfs().read("dirstate")?;
         Ok(Self { content })
     }
 
-    pub fn tracked_files(
-        &self,
-    ) -> Result<Vec<&HgPath>, ListDirstateTrackedFilesError> {
-        let (_, entries, _) = parse_dirstate(&self.content)
-            .map_err(ListDirstateTrackedFilesErrorKind::ParseError)?;
+    pub fn tracked_files(&self) -> Result<Vec<&HgPath>, HgError> {
+        let (_, entries, _) = parse_dirstate(&self.content)?;
         let mut files: Vec<&HgPath> = entries
             .into_iter()
             .filter_map(|(path, entry)| match entry.state {
@@ -78,81 +43,18 @@
     }
 }
 
-/// Kind of error encountered by `ListRevTrackedFiles`
-#[derive(Debug)]
-pub enum ListRevTrackedFilesErrorKind {
-    /// Error when reading a `revlog` file.
-    IoError(std::io::Error),
-    /// The revision has not been found.
-    InvalidRevision,
-    /// Found more than one revision whose ID match the requested prefix
-    AmbiguousPrefix,
-    /// A `revlog` file is corrupted.
-    CorruptedRevlog,
-    /// The `revlog` format version is not supported.
-    UnsuportedRevlogVersion(u16),
-    /// The `revlog` data format is not supported.
-    UnknowRevlogDataFormat(u8),
-}
-
-/// A `ListRevTrackedFiles` error
-#[derive(Debug)]
-pub struct ListRevTrackedFilesError {
-    /// Kind of error encountered by `ListRevTrackedFiles`
-    pub kind: ListRevTrackedFilesErrorKind,
-}
-
-impl From<ListRevTrackedFilesErrorKind> for ListRevTrackedFilesError {
-    fn from(kind: ListRevTrackedFilesErrorKind) -> Self {
-        ListRevTrackedFilesError { kind }
-    }
-}
-
-impl From<RevlogError> for ListRevTrackedFilesError {
-    fn from(err: RevlogError) -> Self {
-        match err {
-            RevlogError::IoError(err) => {
-                ListRevTrackedFilesErrorKind::IoError(err)
-            }
-            RevlogError::UnsuportedVersion(version) => {
-                ListRevTrackedFilesErrorKind::UnsuportedRevlogVersion(version)
-            }
-            RevlogError::InvalidRevision => {
-                ListRevTrackedFilesErrorKind::InvalidRevision
-            }
-            RevlogError::AmbiguousPrefix => {
-                ListRevTrackedFilesErrorKind::AmbiguousPrefix
-            }
-            RevlogError::Corrupted => {
-                ListRevTrackedFilesErrorKind::CorruptedRevlog
-            }
-            RevlogError::UnknowDataFormat(format) => {
-                ListRevTrackedFilesErrorKind::UnknowRevlogDataFormat(format)
-            }
-        }
-        .into()
-    }
-}
-
 /// List files under Mercurial control at a given revision.
 pub fn list_rev_tracked_files(
     repo: &Repo,
-    rev: &str,
-) -> Result<FilesForRev, ListRevTrackedFilesError> {
+    revset: &str,
+) -> Result<FilesForRev, RevlogError> {
+    let rev = crate::revset::resolve_single(revset, repo)?;
     let changelog = Changelog::open(repo)?;
     let manifest = Manifest::open(repo)?;
-
-    let changelog_entry = match rev.parse::<Revision>() {
-        Ok(rev) => changelog.get_rev(rev)?,
-        _ => {
-            let changelog_node = NodePrefix::from_hex(&rev)
-                .or(Err(ListRevTrackedFilesErrorKind::InvalidRevision))?;
-            changelog.get_node(changelog_node.borrow())?
-        }
-    };
-    let manifest_node = Node::from_hex(&changelog_entry.manifest_node()?)
-        .or(Err(ListRevTrackedFilesErrorKind::CorruptedRevlog))?;
-    let manifest_entry = manifest.get_node((&manifest_node).into())?;
+    let changelog_entry = changelog.get_rev(rev)?;
+    let manifest_node =
+        Node::from_hex_for_repo(&changelog_entry.manifest_node()?)?;
+    let manifest_entry = manifest.get_node(manifest_node.into())?;
     Ok(FilesForRev(manifest_entry))
 }
 
--- a/rust/hg-core/src/operations/mod.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/operations/mod.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -5,19 +5,8 @@
 mod cat;
 mod debugdata;
 mod dirstate_status;
-mod find_root;
 mod list_tracked_files;
-pub use cat::{cat, CatRevError, CatRevErrorKind};
-pub use debugdata::{
-    debug_data, DebugDataError, DebugDataErrorKind, DebugDataKind,
-};
-pub use find_root::{
-    find_root, find_root_from_path, FindRootError, FindRootErrorKind,
-};
-pub use list_tracked_files::{
-    list_rev_tracked_files, FilesForRev, ListRevTrackedFilesError,
-    ListRevTrackedFilesErrorKind,
-};
-pub use list_tracked_files::{
-    Dirstate, ListDirstateTrackedFilesError, ListDirstateTrackedFilesErrorKind,
-};
+pub use cat::{cat, CatOutput};
+pub use debugdata::{debug_data, DebugDataKind};
+pub use list_tracked_files::Dirstate;
+pub use list_tracked_files::{list_rev_tracked_files, FilesForRev};
--- a/rust/hg-core/src/repo.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/repo.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -1,6 +1,10 @@
-use crate::operations::{find_root, FindRootError};
+use crate::config::{Config, ConfigError, ConfigParseError};
+use crate::errors::{HgError, IoErrorContext, IoResultExt};
 use crate::requirements;
+use crate::utils::files::get_path_from_bytes;
+use crate::utils::SliceExt;
 use memmap::{Mmap, MmapOptions};
+use std::collections::HashSet;
 use std::path::{Path, PathBuf};
 
 /// A repository on disk
@@ -8,49 +12,202 @@
     working_directory: PathBuf,
     dot_hg: PathBuf,
     store: PathBuf,
+    requirements: HashSet<String>,
+    config: Config,
+}
+
+#[derive(Debug, derive_more::From)]
+pub enum RepoError {
+    NotFound {
+        at: PathBuf,
+    },
+    #[from]
+    ConfigParseError(ConfigParseError),
+    #[from]
+    Other(HgError),
+}
+
+impl From<ConfigError> for RepoError {
+    fn from(error: ConfigError) -> Self {
+        match error {
+            ConfigError::Parse(error) => error.into(),
+            ConfigError::Other(error) => error.into(),
+        }
+    }
 }
 
 /// Filesystem access abstraction for the contents of a given "base" diretory
 #[derive(Clone, Copy)]
-pub(crate) struct Vfs<'a> {
-    base: &'a Path,
+pub struct Vfs<'a> {
+    pub(crate) base: &'a Path,
 }
 
 impl Repo {
-    /// Returns `None` if the given path doesn’t look like a repository
-    /// (doesn’t contain a `.hg` sub-directory).
-    pub fn for_path(root: impl Into<PathBuf>) -> Self {
-        let working_directory = root.into();
-        let dot_hg = working_directory.join(".hg");
-        Self {
-            store: dot_hg.join("store"),
-            dot_hg,
-            working_directory,
+    /// Find a repository, either at the given path (which must contain a `.hg`
+    /// sub-directory) or by searching the current directory and its
+    /// ancestors.
+    ///
+    /// A method with two very different "modes" like this usually a code smell
+    /// to make two methods instead, but in this case an `Option` is what rhg
+    /// sub-commands get from Clap for the `-R` / `--repository` CLI argument.
+    /// Having two methods would just move that `if` to almost all callers.
+    pub fn find(
+        config: &Config,
+        explicit_path: Option<&Path>,
+    ) -> Result<Self, RepoError> {
+        if let Some(root) = explicit_path {
+            if root.join(".hg").is_dir() {
+                Self::new_at_path(root.to_owned(), config)
+            } else if root.is_file() {
+                Err(HgError::unsupported("bundle repository").into())
+            } else {
+                Err(RepoError::NotFound {
+                    at: root.to_owned(),
+                })
+            }
+        } else {
+            let current_directory = crate::utils::current_dir()?;
+            // ancestors() is inclusive: it first yields `current_directory`
+            // as-is.
+            for ancestor in current_directory.ancestors() {
+                if ancestor.join(".hg").is_dir() {
+                    return Self::new_at_path(ancestor.to_owned(), config);
+                }
+            }
+            Err(RepoError::NotFound {
+                at: current_directory,
+            })
         }
     }
 
-    pub fn find() -> Result<Self, FindRootError> {
-        find_root().map(Self::for_path)
-    }
+    /// To be called after checking that `.hg` is a sub-directory
+    fn new_at_path(
+        working_directory: PathBuf,
+        config: &Config,
+    ) -> Result<Self, RepoError> {
+        let dot_hg = working_directory.join(".hg");
+
+        let mut repo_config_files = Vec::new();
+        repo_config_files.push(dot_hg.join("hgrc"));
+        repo_config_files.push(dot_hg.join("hgrc-not-shared"));
+
+        let hg_vfs = Vfs { base: &dot_hg };
+        let mut reqs = requirements::load_if_exists(hg_vfs)?;
+        let relative =
+            reqs.contains(requirements::RELATIVE_SHARED_REQUIREMENT);
+        let shared =
+            reqs.contains(requirements::SHARED_REQUIREMENT) || relative;
+
+        // From `mercurial/localrepo.py`:
+        //
+        // if .hg/requires contains the sharesafe requirement, it means
+        // there exists a `.hg/store/requires` too and we should read it
+        // NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
+        // is present. We never write SHARESAFE_REQUIREMENT for a repo if store
+        // is not present, refer checkrequirementscompat() for that
+        //
+        // However, if SHARESAFE_REQUIREMENT is not present, it means that the
+        // repository was shared the old way. We check the share source
+        // .hg/requires for SHARESAFE_REQUIREMENT to detect whether the
+        // current repository needs to be reshared
+        let share_safe = reqs.contains(requirements::SHARESAFE_REQUIREMENT);
+
+        let store_path;
+        if !shared {
+            store_path = dot_hg.join("store");
+        } else {
+            let bytes = hg_vfs.read("sharedpath")?;
+            let mut shared_path =
+                get_path_from_bytes(bytes.trim_end_newlines()).to_owned();
+            if relative {
+                shared_path = dot_hg.join(shared_path)
+            }
+            if !shared_path.is_dir() {
+                return Err(HgError::corrupted(format!(
+                    ".hg/sharedpath points to nonexistent directory {}",
+                    shared_path.display()
+                ))
+                .into());
+            }
+
+            store_path = shared_path.join("store");
 
-    pub fn check_requirements(
-        &self,
-    ) -> Result<(), requirements::RequirementsError> {
-        requirements::check(self)
+            let source_is_share_safe =
+                requirements::load(Vfs { base: &shared_path })?
+                    .contains(requirements::SHARESAFE_REQUIREMENT);
+
+            if share_safe && !source_is_share_safe {
+                return Err(match config
+                    .get(b"share", b"safe-mismatch.source-not-safe")
+                {
+                    Some(b"abort") | None => HgError::abort(
+                        "abort: share source does not support share-safe requirement\n\
+                        (see `hg help config.format.use-share-safe` for more information)",
+                    ),
+                    _ => HgError::unsupported("share-safe downgrade"),
+                }
+                .into());
+            } else if source_is_share_safe && !share_safe {
+                return Err(
+                    match config.get(b"share", b"safe-mismatch.source-safe") {
+                        Some(b"abort") | None => HgError::abort(
+                            "abort: version mismatch: source uses share-safe \
+                            functionality while the current share does not\n\
+                            (see `hg help config.format.use-share-safe` for more information)",
+                        ),
+                        _ => HgError::unsupported("share-safe upgrade"),
+                    }
+                    .into(),
+                );
+            }
+
+            if share_safe {
+                repo_config_files.insert(0, shared_path.join("hgrc"))
+            }
+        }
+        if share_safe {
+            reqs.extend(requirements::load(Vfs { base: &store_path })?);
+        }
+
+        let repo_config = if std::env::var_os("HGRCSKIPREPO").is_none() {
+            config.combine_with_repo(&repo_config_files)?
+        } else {
+            config.clone()
+        };
+
+        let repo = Self {
+            requirements: reqs,
+            working_directory,
+            store: store_path,
+            dot_hg,
+            config: repo_config,
+        };
+
+        requirements::check(&repo)?;
+
+        Ok(repo)
     }
 
     pub fn working_directory_path(&self) -> &Path {
         &self.working_directory
     }
 
+    pub fn requirements(&self) -> &HashSet<String> {
+        &self.requirements
+    }
+
+    pub fn config(&self) -> &Config {
+        &self.config
+    }
+
     /// For accessing repository files (in `.hg`), except for the store
     /// (`.hg/store`).
-    pub(crate) fn hg_vfs(&self) -> Vfs<'_> {
+    pub fn hg_vfs(&self) -> Vfs<'_> {
         Vfs { base: &self.dot_hg }
     }
 
     /// For accessing repository store files (in `.hg/store`)
-    pub(crate) fn store_vfs(&self) -> Vfs<'_> {
+    pub fn store_vfs(&self) -> Vfs<'_> {
         Vfs { base: &self.store }
     }
 
@@ -58,35 +215,55 @@
 
     // The undescore prefix silences the "never used" warning. Remove before
     // using.
-    pub(crate) fn _working_directory_vfs(&self) -> Vfs<'_> {
+    pub fn _working_directory_vfs(&self) -> Vfs<'_> {
         Vfs {
             base: &self.working_directory,
         }
     }
+
+    pub fn dirstate_parents(
+        &self,
+    ) -> Result<crate::dirstate::DirstateParents, HgError> {
+        let dirstate = self.hg_vfs().mmap_open("dirstate")?;
+        let parents =
+            crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?;
+        Ok(parents.clone())
+    }
 }
 
 impl Vfs<'_> {
-    pub(crate) fn read(
+    pub fn join(&self, relative_path: impl AsRef<Path>) -> PathBuf {
+        self.base.join(relative_path)
+    }
+
+    pub fn read(
         &self,
         relative_path: impl AsRef<Path>,
-    ) -> std::io::Result<Vec<u8>> {
-        std::fs::read(self.base.join(relative_path))
+    ) -> Result<Vec<u8>, HgError> {
+        let path = self.join(relative_path);
+        std::fs::read(&path).when_reading_file(&path)
     }
 
-    pub(crate) fn open(
+    pub fn mmap_open(
         &self,
         relative_path: impl AsRef<Path>,
-    ) -> std::io::Result<std::fs::File> {
-        std::fs::File::open(self.base.join(relative_path))
+    ) -> Result<Mmap, HgError> {
+        let path = self.base.join(relative_path);
+        let file = std::fs::File::open(&path).when_reading_file(&path)?;
+        // TODO: what are the safety requirements here?
+        let mmap = unsafe { MmapOptions::new().map(&file) }
+            .when_reading_file(&path)?;
+        Ok(mmap)
     }
 
-    pub(crate) fn mmap_open(
+    pub fn rename(
         &self,
-        relative_path: impl AsRef<Path>,
-    ) -> std::io::Result<Mmap> {
-        let file = self.open(relative_path)?;
-        // TODO: what are the safety requirements here?
-        let mmap = unsafe { MmapOptions::new().map(&file) }?;
-        Ok(mmap)
+        relative_from: impl AsRef<Path>,
+        relative_to: impl AsRef<Path>,
+    ) -> Result<(), HgError> {
+        let from = self.join(relative_from);
+        let to = self.join(relative_to);
+        std::fs::rename(&from, &to)
+            .with_context(|| IoErrorContext::RenamingFile { from, to })
     }
 }
--- a/rust/hg-core/src/requirements.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/requirements.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -1,19 +1,9 @@
-use crate::repo::Repo;
-use std::io;
+use crate::errors::{HgError, HgResultExt};
+use crate::repo::{Repo, Vfs};
+use crate::utils::join_display;
+use std::collections::HashSet;
 
-#[derive(Debug)]
-pub enum RequirementsError {
-    // TODO: include a path?
-    Io(io::Error),
-    /// The `requires` file is corrupted
-    Corrupted,
-    /// The repository requires a feature that we don't support
-    Unsupported {
-        feature: String,
-    },
-}
-
-fn parse(bytes: &[u8]) -> Result<Vec<String>, ()> {
+fn parse(bytes: &[u8]) -> Result<HashSet<String>, HgError> {
     // The Python code reading this file uses `str.splitlines`
     // which looks for a number of line separators (even including a couple of
     // non-ASCII ones), but Python code writing it always uses `\n`.
@@ -27,16 +17,20 @@
             if line[0].is_ascii_alphanumeric() && line.is_ascii() {
                 Ok(String::from_utf8(line.into()).unwrap())
             } else {
-                Err(())
+                Err(HgError::corrupted("parse error in 'requires' file"))
             }
         })
         .collect()
 }
 
-pub fn load(repo: &Repo) -> Result<Vec<String>, RequirementsError> {
-    match repo.hg_vfs().read("requires") {
-        Ok(bytes) => parse(&bytes).map_err(|()| RequirementsError::Corrupted),
+pub(crate) fn load(hg_vfs: Vfs) -> Result<HashSet<String>, HgError> {
+    parse(&hg_vfs.read("requires")?)
+}
 
+pub(crate) fn load_if_exists(hg_vfs: Vfs) -> Result<HashSet<String>, HgError> {
+    if let Some(bytes) = hg_vfs.read("requires").io_not_found_as_none()? {
+        parse(&bytes)
+    } else {
         // Treat a missing file the same as an empty file.
         // From `mercurial/localrepo.py`:
         // > requires file contains a newline-delimited list of
@@ -44,33 +38,116 @@
         // > the repository. This file was introduced in Mercurial 0.9.2,
         // > which means very old repositories may not have one. We assume
         // > a missing file translates to no requirements.
-        Err(error) if error.kind() == std::io::ErrorKind::NotFound => {
-            Ok(Vec::new())
-        }
-
-        Err(error) => Err(RequirementsError::Io(error))?,
+        Ok(HashSet::new())
     }
 }
 
-pub fn check(repo: &Repo) -> Result<(), RequirementsError> {
-    for feature in load(repo)? {
-        if !SUPPORTED.contains(&&*feature) {
-            return Err(RequirementsError::Unsupported { feature });
-        }
+pub(crate) fn check(repo: &Repo) -> Result<(), HgError> {
+    let unknown: Vec<_> = repo
+        .requirements()
+        .iter()
+        .map(String::as_str)
+        // .filter(|feature| !ALL_SUPPORTED.contains(feature.as_str()))
+        .filter(|feature| {
+            !REQUIRED.contains(feature) && !SUPPORTED.contains(feature)
+        })
+        .collect();
+    if !unknown.is_empty() {
+        return Err(HgError::unsupported(format!(
+            "repository requires feature unknown to this Mercurial: {}",
+            join_display(&unknown, ", ")
+        )));
+    }
+    let missing: Vec<_> = REQUIRED
+        .iter()
+        .filter(|&&feature| !repo.requirements().contains(feature))
+        .collect();
+    if !missing.is_empty() {
+        return Err(HgError::unsupported(format!(
+            "repository is missing feature required by this Mercurial: {}",
+            join_display(&missing, ", ")
+        )));
     }
     Ok(())
 }
 
-// TODO: set this to actually-supported features
+/// rhg does not support repositories that are *missing* any of these features
+const REQUIRED: &[&str] = &["revlogv1", "store", "fncache", "dotencode"];
+
+/// rhg supports repository with or without these
 const SUPPORTED: &[&str] = &[
-    "dotencode",
-    "fncache",
     "generaldelta",
-    "revlogv1",
-    "sparserevlog",
-    "store",
+    SHARED_REQUIREMENT,
+    SHARESAFE_REQUIREMENT,
+    SPARSEREVLOG_REQUIREMENT,
+    RELATIVE_SHARED_REQUIREMENT,
     // As of this writing everything rhg does is read-only.
     // When it starts writing to the repository, it’ll need to either keep the
     // persistent nodemap up to date or remove this entry:
-    "persistent-nodemap",
+    NODEMAP_REQUIREMENT,
 ];
+
+// Copied from mercurial/requirements.py:
+
+/// When narrowing is finalized and no longer subject to format changes,
+/// we should move this to just "narrow" or similar.
+#[allow(unused)]
+pub(crate) const NARROW_REQUIREMENT: &str = "narrowhg-experimental";
+
+/// Enables sparse working directory usage
+#[allow(unused)]
+pub(crate) const SPARSE_REQUIREMENT: &str = "exp-sparse";
+
+/// Enables the internal phase which is used to hide changesets instead
+/// of stripping them
+#[allow(unused)]
+pub(crate) const INTERNAL_PHASE_REQUIREMENT: &str = "internal-phase";
+
+/// Stores manifest in Tree structure
+#[allow(unused)]
+pub(crate) const TREEMANIFEST_REQUIREMENT: &str = "treemanifest";
+
+/// Increment the sub-version when the revlog v2 format changes to lock out old
+/// clients.
+#[allow(unused)]
+pub(crate) const REVLOGV2_REQUIREMENT: &str = "exp-revlogv2.1";
+
+/// A repository with the sparserevlog feature will have delta chains that
+/// can spread over a larger span. Sparse reading cuts these large spans into
+/// pieces, so that each piece isn't too big.
+/// Without the sparserevlog capability, reading from the repository could use
+/// huge amounts of memory, because the whole span would be read at once,
+/// including all the intermediate revisions that aren't pertinent for the
+/// chain. This is why once a repository has enabled sparse-read, it becomes
+/// required.
+#[allow(unused)]
+pub(crate) const SPARSEREVLOG_REQUIREMENT: &str = "sparserevlog";
+
+/// A repository with the sidedataflag requirement will allow to store extra
+/// information for revision without altering their original hashes.
+#[allow(unused)]
+pub(crate) const SIDEDATA_REQUIREMENT: &str = "exp-sidedata-flag";
+
+/// A repository with the the copies-sidedata-changeset requirement will store
+/// copies related information in changeset's sidedata.
+#[allow(unused)]
+pub(crate) const COPIESSDC_REQUIREMENT: &str = "exp-copies-sidedata-changeset";
+
+/// The repository use persistent nodemap for the changelog and the manifest.
+#[allow(unused)]
+pub(crate) const NODEMAP_REQUIREMENT: &str = "persistent-nodemap";
+
+/// Denotes that the current repository is a share
+#[allow(unused)]
+pub(crate) const SHARED_REQUIREMENT: &str = "shared";
+
+/// Denotes that current repository is a share and the shared source path is
+/// relative to the current repository root path
+#[allow(unused)]
+pub(crate) const RELATIVE_SHARED_REQUIREMENT: &str = "relshared";
+
+/// A repository with share implemented safely. The repository has different
+/// store and working copy requirements i.e. both `.hg/requires` and
+/// `.hg/store/requires` are present.
+#[allow(unused)]
+pub(crate) const SHARESAFE_REQUIREMENT: &str = "share-safe";
--- a/rust/hg-core/src/revlog.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/revlog.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -9,7 +9,7 @@
 pub mod nodemap;
 mod nodemap_docket;
 pub mod path_encode;
-pub use node::{Node, NodeError, NodePrefix, NodePrefixRef};
+pub use node::{FromHexError, Node, NodePrefix};
 pub mod changelog;
 pub mod index;
 pub mod manifest;
--- a/rust/hg-core/src/revlog/changelog.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/revlog/changelog.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -1,12 +1,13 @@
+use crate::errors::HgError;
 use crate::repo::Repo;
 use crate::revlog::revlog::{Revlog, RevlogError};
-use crate::revlog::NodePrefixRef;
 use crate::revlog::Revision;
+use crate::revlog::{Node, NodePrefix};
 
 /// A specialized `Revlog` to work with `changelog` data format.
 pub struct Changelog {
     /// The generic `revlog` format.
-    revlog: Revlog,
+    pub(crate) revlog: Revlog,
 }
 
 impl Changelog {
@@ -19,7 +20,7 @@
     /// Return the `ChangelogEntry` a given node id.
     pub fn get_node(
         &self,
-        node: NodePrefixRef,
+        node: NodePrefix,
     ) -> Result<ChangelogEntry, RevlogError> {
         let rev = self.revlog.get_node_rev(node)?;
         self.get_rev(rev)
@@ -33,6 +34,10 @@
         let bytes = self.revlog.get_rev_data(rev)?;
         Ok(ChangelogEntry { bytes })
     }
+
+    pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
+        Some(self.revlog.index.get_entry(rev)?.hash())
+    }
 }
 
 /// `Changelog` entry which knows how to interpret the `changelog` data bytes.
@@ -53,6 +58,8 @@
     /// Return the node id of the `manifest` referenced by this `changelog`
     /// entry.
     pub fn manifest_node(&self) -> Result<&[u8], RevlogError> {
-        self.lines().next().ok_or(RevlogError::Corrupted)
+        self.lines()
+            .next()
+            .ok_or_else(|| HgError::corrupted("empty changelog entry").into())
     }
 }
--- a/rust/hg-core/src/revlog/index.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/revlog/index.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -3,6 +3,7 @@
 
 use byteorder::{BigEndian, ByteOrder};
 
+use crate::errors::HgError;
 use crate::revlog::node::Node;
 use crate::revlog::revlog::RevlogError;
 use crate::revlog::{Revision, NULL_REVISION};
@@ -44,7 +45,8 @@
                     offsets: Some(offsets),
                 })
             } else {
-                Err(RevlogError::Corrupted)
+                Err(HgError::corrupted("unexpected inline revlog length")
+                    .into())
             }
         } else {
             Ok(Self {
--- a/rust/hg-core/src/revlog/manifest.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/revlog/manifest.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -1,6 +1,6 @@
 use crate::repo::Repo;
 use crate::revlog::revlog::{Revlog, RevlogError};
-use crate::revlog::NodePrefixRef;
+use crate::revlog::NodePrefix;
 use crate::revlog::Revision;
 use crate::utils::hg_path::HgPath;
 
@@ -20,7 +20,7 @@
     /// Return the `ManifestEntry` of a given node id.
     pub fn get_node(
         &self,
-        node: NodePrefixRef,
+        node: NodePrefix,
     ) -> Result<ManifestEntry, RevlogError> {
         let rev = self.revlog.get_node_rev(node)?;
         self.get_rev(rev)
--- a/rust/hg-core/src/revlog/node.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/revlog/node.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -8,8 +8,10 @@
 //! In Mercurial code base, it is customary to call "a node" the binary SHA
 //! of a revision.
 
-use hex::{self, FromHex, FromHexError};
+use crate::errors::HgError;
+use bytes_cast::BytesCast;
 use std::convert::{TryFrom, TryInto};
+use std::fmt;
 
 /// The length in bytes of a `Node`
 ///
@@ -29,6 +31,9 @@
 /// see also `NODES_BYTES_LENGTH` about it being private.
 const NODE_NYBBLES_LENGTH: usize = 2 * NODE_BYTES_LENGTH;
 
+/// Default for UI presentation
+const SHORT_PREFIX_DEFAULT_NYBBLES_LENGTH: u8 = 12;
+
 /// Private alias for readability and to ease future change
 type NodeData = [u8; NODE_BYTES_LENGTH];
 
@@ -45,11 +50,10 @@
 /// if they need a loop boundary.
 ///
 /// All methods that create a `Node` either take a type that enforces
-/// the size or fail immediately at runtime with [`ExactLengthRequired`].
+/// the size or return an error at runtime.
 ///
 /// [`nybbles_len`]: #method.nybbles_len
-/// [`ExactLengthRequired`]: struct.NodeError#variant.ExactLengthRequired
-#[derive(Clone, Debug, PartialEq)]
+#[derive(Copy, Clone, Debug, PartialEq, BytesCast, derive_more::From)]
 #[repr(transparent)]
 pub struct Node {
     data: NodeData,
@@ -60,32 +64,49 @@
     data: [0; NODE_BYTES_LENGTH],
 };
 
-impl From<NodeData> for Node {
-    fn from(data: NodeData) -> Node {
-        Node { data }
+/// Return an error if the slice has an unexpected length
+impl<'a> TryFrom<&'a [u8]> for &'a Node {
+    type Error = ();
+
+    #[inline]
+    fn try_from(bytes: &'a [u8]) -> Result<Self, Self::Error> {
+        match Node::from_bytes(bytes) {
+            Ok((node, rest)) if rest.is_empty() => Ok(node),
+            _ => Err(()),
+        }
     }
 }
 
 /// Return an error if the slice has an unexpected length
-impl<'a> TryFrom<&'a [u8]> for &'a Node {
+impl TryFrom<&'_ [u8]> for Node {
     type Error = std::array::TryFromSliceError;
 
     #[inline]
-    fn try_from(bytes: &'a [u8]) -> Result<&'a Node, Self::Error> {
+    fn try_from(bytes: &'_ [u8]) -> Result<Self, Self::Error> {
         let data = bytes.try_into()?;
-        // Safety: `#[repr(transparent)]` makes it ok to "wrap" the target
-        // of a reference to the type of the single field.
-        Ok(unsafe { std::mem::transmute::<&NodeData, &Node>(data) })
+        Ok(Self { data })
     }
 }
 
-#[derive(Debug, PartialEq)]
-pub enum NodeError {
-    ExactLengthRequired(usize, String),
-    PrefixTooLong(String),
-    HexError(FromHexError, String),
+impl From<&'_ NodeData> for Node {
+    #[inline]
+    fn from(data: &'_ NodeData) -> Self {
+        Self { data: *data }
+    }
 }
 
+impl fmt::LowerHex for Node {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        for &byte in &self.data {
+            write!(f, "{:02x}", byte)?
+        }
+        Ok(())
+    }
+}
+
+#[derive(Debug)]
+pub struct FromHexError;
+
 /// Low level utility function, also for prefixes
 fn get_nybble(s: &[u8], i: usize) -> u8 {
     if i % 2 == 0 {
@@ -117,18 +138,26 @@
     ///
     /// To be used in FFI and I/O only, in order to facilitate future
     /// changes of hash format.
-    pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Node, NodeError> {
-        Ok(NodeData::from_hex(hex.as_ref())
-            .map_err(|e| NodeError::from((e, hex)))?
-            .into())
+    pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Node, FromHexError> {
+        let prefix = NodePrefix::from_hex(hex)?;
+        if prefix.nybbles_len() == NODE_NYBBLES_LENGTH {
+            Ok(Self { data: prefix.data })
+        } else {
+            Err(FromHexError)
+        }
     }
 
-    /// Convert to hexadecimal string representation
+    /// `from_hex`, but for input from an internal file of the repository such
+    /// as a changelog or manifest entry.
     ///
-    /// To be used in FFI and I/O only, in order to facilitate future
-    /// changes of hash format.
-    pub fn encode_hex(&self) -> String {
-        hex::encode(self.data)
+    /// An error is treated as repository corruption.
+    pub fn from_hex_for_repo(hex: impl AsRef<[u8]>) -> Result<Node, HgError> {
+        Self::from_hex(hex.as_ref()).map_err(|FromHexError| {
+            HgError::CorruptedRepository(format!(
+                "Expected a full hexadecimal node ID, found {}",
+                String::from_utf8_lossy(hex.as_ref())
+            ))
+        })
     }
 
     /// Provide access to binary data
@@ -138,17 +167,11 @@
     pub fn as_bytes(&self) -> &[u8] {
         &self.data
     }
-}
 
-impl<T: AsRef<[u8]>> From<(FromHexError, T)> for NodeError {
-    fn from(err_offender: (FromHexError, T)) -> Self {
-        let (err, offender) = err_offender;
-        let offender = String::from_utf8_lossy(offender.as_ref()).into_owned();
-        match err {
-            FromHexError::InvalidStringLength => {
-                NodeError::ExactLengthRequired(NODE_NYBBLES_LENGTH, offender)
-            }
-            _ => NodeError::HexError(err, offender),
+    pub fn short(&self) -> NodePrefix {
+        NodePrefix {
+            nybbles_len: SHORT_PREFIX_DEFAULT_NYBBLES_LENGTH,
+            data: self.data,
         }
     }
 }
@@ -158,10 +181,14 @@
 /// Since it can potentially come from an hexadecimal representation with
 /// odd length, it needs to carry around whether the last 4 bits are relevant
 /// or not.
-#[derive(Debug, PartialEq)]
+#[derive(Debug, PartialEq, Copy, Clone)]
 pub struct NodePrefix {
-    buf: Vec<u8>,
-    is_odd: bool,
+    /// In `1..=NODE_NYBBLES_LENGTH`
+    nybbles_len: u8,
+    /// The first `4 * length_in_nybbles` bits are used (considering bits
+    /// within a bytes in big-endian: most significant first), the rest
+    /// are zero.
+    data: NodeData,
 }
 
 impl NodePrefix {
@@ -172,72 +199,42 @@
     ///
     /// To be used in FFI and I/O only, in order to facilitate future
     /// changes of hash format.
-    pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Self, NodeError> {
+    pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Self, FromHexError> {
         let hex = hex.as_ref();
         let len = hex.len();
-        if len > NODE_NYBBLES_LENGTH {
-            return Err(NodeError::PrefixTooLong(
-                String::from_utf8_lossy(hex).to_owned().to_string(),
-            ));
+        if len > NODE_NYBBLES_LENGTH || len == 0 {
+            return Err(FromHexError);
         }
 
-        let is_odd = len % 2 == 1;
-        let even_part = if is_odd { &hex[..len - 1] } else { hex };
-        let mut buf: Vec<u8> =
-            Vec::from_hex(&even_part).map_err(|e| (e, hex))?;
-
-        if is_odd {
-            let latest_char = char::from(hex[len - 1]);
-            let latest_nybble = latest_char.to_digit(16).ok_or_else(|| {
-                (
-                    FromHexError::InvalidHexCharacter {
-                        c: latest_char,
-                        index: len - 1,
-                    },
-                    hex,
-                )
-            })? as u8;
-            buf.push(latest_nybble << 4);
+        let mut data = [0; NODE_BYTES_LENGTH];
+        let mut nybbles_len = 0;
+        for &ascii_byte in hex {
+            let nybble = match char::from(ascii_byte).to_digit(16) {
+                Some(digit) => digit as u8,
+                None => return Err(FromHexError),
+            };
+            // Fill in the upper half of a byte first, then the lower half.
+            let shift = if nybbles_len % 2 == 0 { 4 } else { 0 };
+            data[nybbles_len as usize / 2] |= nybble << shift;
+            nybbles_len += 1;
         }
-        Ok(NodePrefix { buf, is_odd })
+        Ok(Self { data, nybbles_len })
     }
 
-    pub fn borrow(&self) -> NodePrefixRef {
-        NodePrefixRef {
-            buf: &self.buf,
-            is_odd: self.is_odd,
-        }
-    }
-}
-
-#[derive(Clone, Debug, PartialEq)]
-pub struct NodePrefixRef<'a> {
-    buf: &'a [u8],
-    is_odd: bool,
-}
-
-impl<'a> NodePrefixRef<'a> {
-    pub fn len(&self) -> usize {
-        if self.is_odd {
-            self.buf.len() * 2 - 1
-        } else {
-            self.buf.len() * 2
-        }
-    }
-
-    pub fn is_empty(&self) -> bool {
-        self.len() == 0
+    pub fn nybbles_len(&self) -> usize {
+        self.nybbles_len as _
     }
 
     pub fn is_prefix_of(&self, node: &Node) -> bool {
-        if self.is_odd {
-            let buf = self.buf;
-            let last_pos = buf.len() - 1;
-            node.data.starts_with(buf.split_at(last_pos).0)
-                && node.data[last_pos] >> 4 == buf[last_pos] >> 4
-        } else {
-            node.data.starts_with(self.buf)
+        let full_bytes = self.nybbles_len() / 2;
+        if self.data[..full_bytes] != node.data[..full_bytes] {
+            return false;
         }
+        if self.nybbles_len() % 2 == 0 {
+            return true;
+        }
+        let last = self.nybbles_len() - 1;
+        self.get_nybble(last) == node.get_nybble(last)
     }
 
     /// Retrieve the `i`th half-byte from the prefix.
@@ -245,8 +242,12 @@
     /// This is also the `i`th hexadecimal digit in numeric form,
     /// also called a [nybble](https://en.wikipedia.org/wiki/Nibble).
     pub fn get_nybble(&self, i: usize) -> u8 {
-        assert!(i < self.len());
-        get_nybble(self.buf, i)
+        assert!(i < self.nybbles_len());
+        get_nybble(&self.data, i)
+    }
+
+    fn iter_nybbles(&self) -> impl Iterator<Item = u8> + '_ {
+        (0..self.nybbles_len()).map(move |i| get_nybble(&self.data, i))
     }
 
     /// Return the index first nybble that's different from `node`
@@ -257,42 +258,49 @@
     ///
     /// Returned index is as in `get_nybble`, i.e., starting at 0.
     pub fn first_different_nybble(&self, node: &Node) -> Option<usize> {
-        let buf = self.buf;
-        let until = if self.is_odd {
-            buf.len() - 1
-        } else {
-            buf.len()
-        };
-        for (i, item) in buf.iter().enumerate().take(until) {
-            if *item != node.data[i] {
-                return if *item & 0xf0 == node.data[i] & 0xf0 {
-                    Some(2 * i + 1)
-                } else {
-                    Some(2 * i)
-                };
-            }
+        self.iter_nybbles()
+            .zip(NodePrefix::from(*node).iter_nybbles())
+            .position(|(a, b)| a != b)
+    }
+}
+
+impl fmt::LowerHex for NodePrefix {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        let full_bytes = self.nybbles_len() / 2;
+        for &byte in &self.data[..full_bytes] {
+            write!(f, "{:02x}", byte)?
         }
-        if self.is_odd && buf[until] & 0xf0 != node.data[until] & 0xf0 {
-            Some(until * 2)
-        } else {
-            None
+        if self.nybbles_len() % 2 == 1 {
+            let last = self.nybbles_len() - 1;
+            write!(f, "{:x}", self.get_nybble(last))?
+        }
+        Ok(())
+    }
+}
+
+/// A shortcut for full `Node` references
+impl From<&'_ Node> for NodePrefix {
+    fn from(node: &'_ Node) -> Self {
+        NodePrefix {
+            nybbles_len: node.nybbles_len() as _,
+            data: node.data,
         }
     }
 }
 
 /// A shortcut for full `Node` references
-impl<'a> From<&'a Node> for NodePrefixRef<'a> {
-    fn from(node: &'a Node) -> Self {
-        NodePrefixRef {
-            buf: &node.data,
-            is_odd: false,
+impl From<Node> for NodePrefix {
+    fn from(node: Node) -> Self {
+        NodePrefix {
+            nybbles_len: node.nybbles_len() as _,
+            data: node.data,
         }
     }
 }
 
-impl PartialEq<Node> for NodePrefixRef<'_> {
+impl PartialEq<Node> for NodePrefix {
     fn eq(&self, other: &Node) -> bool {
-        !self.is_odd && self.buf == other.data
+        Self::from(*other) == *self
     }
 }
 
@@ -300,18 +308,16 @@
 mod tests {
     use super::*;
 
-    fn sample_node() -> Node {
-        let mut data = [0; NODE_BYTES_LENGTH];
-        data.copy_from_slice(&[
+    const SAMPLE_NODE_HEX: &str = "0123456789abcdeffedcba9876543210deadbeef";
+    const SAMPLE_NODE: Node = Node {
+        data: [
             0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba,
             0x98, 0x76, 0x54, 0x32, 0x10, 0xde, 0xad, 0xbe, 0xef,
-        ]);
-        data.into()
-    }
+        ],
+    };
 
     /// Pad an hexadecimal string to reach `NODE_NYBBLES_LENGTH`
-    ///check_hash
-    /// The padding is made with zeros
+    /// The padding is made with zeros.
     pub fn hex_pad_right(hex: &str) -> String {
         let mut res = hex.to_string();
         while res.len() < NODE_NYBBLES_LENGTH {
@@ -320,135 +326,88 @@
         res
     }
 
-    fn sample_node_hex() -> String {
-        hex_pad_right("0123456789abcdeffedcba9876543210deadbeef")
-    }
-
     #[test]
     fn test_node_from_hex() {
-        assert_eq!(Node::from_hex(&sample_node_hex()), Ok(sample_node()));
-
-        let mut short = hex_pad_right("0123");
-        short.pop();
-        short.pop();
-        assert_eq!(
-            Node::from_hex(&short),
-            Err(NodeError::ExactLengthRequired(NODE_NYBBLES_LENGTH, short)),
-        );
-
-        let not_hex = hex_pad_right("012... oops");
-        assert_eq!(
-            Node::from_hex(&not_hex),
-            Err(NodeError::HexError(
-                FromHexError::InvalidHexCharacter { c: '.', index: 3 },
-                not_hex,
-            )),
-        );
+        let not_hex = "012... oops";
+        let too_short = "0123";
+        let too_long = format!("{}0", SAMPLE_NODE_HEX);
+        assert_eq!(Node::from_hex(SAMPLE_NODE_HEX).unwrap(), SAMPLE_NODE);
+        assert!(Node::from_hex(not_hex).is_err());
+        assert!(Node::from_hex(too_short).is_err());
+        assert!(Node::from_hex(&too_long).is_err());
     }
 
     #[test]
     fn test_node_encode_hex() {
-        assert_eq!(sample_node().encode_hex(), sample_node_hex());
+        assert_eq!(format!("{:x}", SAMPLE_NODE), SAMPLE_NODE_HEX);
     }
 
     #[test]
-    fn test_prefix_from_hex() -> Result<(), NodeError> {
-        assert_eq!(
-            NodePrefix::from_hex("0e1")?,
-            NodePrefix {
-                buf: vec![14, 16],
-                is_odd: true
-            }
-        );
+    fn test_prefix_from_to_hex() -> Result<(), FromHexError> {
+        assert_eq!(format!("{:x}", NodePrefix::from_hex("0e1")?), "0e1");
+        assert_eq!(format!("{:x}", NodePrefix::from_hex("0e1a")?), "0e1a");
         assert_eq!(
-            NodePrefix::from_hex("0e1a")?,
-            NodePrefix {
-                buf: vec![14, 26],
-                is_odd: false
-            }
+            format!("{:x}", NodePrefix::from_hex(SAMPLE_NODE_HEX)?),
+            SAMPLE_NODE_HEX
         );
-
-        // checking limit case
-        let node_as_vec = sample_node().data.iter().cloned().collect();
-        assert_eq!(
-            NodePrefix::from_hex(sample_node_hex())?,
-            NodePrefix {
-                buf: node_as_vec,
-                is_odd: false
-            }
-        );
-
         Ok(())
     }
 
     #[test]
     fn test_prefix_from_hex_errors() {
-        assert_eq!(
-            NodePrefix::from_hex("testgr"),
-            Err(NodeError::HexError(
-                FromHexError::InvalidHexCharacter { c: 't', index: 0 },
-                "testgr".to_string()
-            ))
-        );
-        let mut long = NULL_NODE.encode_hex();
+        assert!(NodePrefix::from_hex("testgr").is_err());
+        let mut long = format!("{:x}", NULL_NODE);
         long.push('c');
-        match NodePrefix::from_hex(&long)
-            .expect_err("should be refused as too long")
-        {
-            NodeError::PrefixTooLong(s) => assert_eq!(s, long),
-            err => panic!(format!("Should have been TooLong, got {:?}", err)),
-        }
+        assert!(NodePrefix::from_hex(&long).is_err())
     }
 
     #[test]
-    fn test_is_prefix_of() -> Result<(), NodeError> {
+    fn test_is_prefix_of() -> Result<(), FromHexError> {
         let mut node_data = [0; NODE_BYTES_LENGTH];
         node_data[0] = 0x12;
         node_data[1] = 0xca;
         let node = Node::from(node_data);
-        assert!(NodePrefix::from_hex("12")?.borrow().is_prefix_of(&node));
-        assert!(!NodePrefix::from_hex("1a")?.borrow().is_prefix_of(&node));
-        assert!(NodePrefix::from_hex("12c")?.borrow().is_prefix_of(&node));
-        assert!(!NodePrefix::from_hex("12d")?.borrow().is_prefix_of(&node));
+        assert!(NodePrefix::from_hex("12")?.is_prefix_of(&node));
+        assert!(!NodePrefix::from_hex("1a")?.is_prefix_of(&node));
+        assert!(NodePrefix::from_hex("12c")?.is_prefix_of(&node));
+        assert!(!NodePrefix::from_hex("12d")?.is_prefix_of(&node));
         Ok(())
     }
 
     #[test]
-    fn test_get_nybble() -> Result<(), NodeError> {
+    fn test_get_nybble() -> Result<(), FromHexError> {
         let prefix = NodePrefix::from_hex("dead6789cafe")?;
-        assert_eq!(prefix.borrow().get_nybble(0), 13);
-        assert_eq!(prefix.borrow().get_nybble(7), 9);
+        assert_eq!(prefix.get_nybble(0), 13);
+        assert_eq!(prefix.get_nybble(7), 9);
         Ok(())
     }
 
     #[test]
     fn test_first_different_nybble_even_prefix() {
         let prefix = NodePrefix::from_hex("12ca").unwrap();
-        let prefref = prefix.borrow();
         let mut node = Node::from([0; NODE_BYTES_LENGTH]);
-        assert_eq!(prefref.first_different_nybble(&node), Some(0));
+        assert_eq!(prefix.first_different_nybble(&node), Some(0));
         node.data[0] = 0x13;
-        assert_eq!(prefref.first_different_nybble(&node), Some(1));
+        assert_eq!(prefix.first_different_nybble(&node), Some(1));
         node.data[0] = 0x12;
-        assert_eq!(prefref.first_different_nybble(&node), Some(2));
+        assert_eq!(prefix.first_different_nybble(&node), Some(2));
         node.data[1] = 0xca;
         // now it is a prefix
-        assert_eq!(prefref.first_different_nybble(&node), None);
+        assert_eq!(prefix.first_different_nybble(&node), None);
     }
 
     #[test]
     fn test_first_different_nybble_odd_prefix() {
         let prefix = NodePrefix::from_hex("12c").unwrap();
-        let prefref = prefix.borrow();
         let mut node = Node::from([0; NODE_BYTES_LENGTH]);
-        assert_eq!(prefref.first_different_nybble(&node), Some(0));
+        assert_eq!(prefix.first_different_nybble(&node), Some(0));
         node.data[0] = 0x13;
-        assert_eq!(prefref.first_different_nybble(&node), Some(1));
+        assert_eq!(prefix.first_different_nybble(&node), Some(1));
         node.data[0] = 0x12;
-        assert_eq!(prefref.first_different_nybble(&node), Some(2));
+        assert_eq!(prefix.first_different_nybble(&node), Some(2));
         node.data[1] = 0xca;
         // now it is a prefix
-        assert_eq!(prefref.first_different_nybble(&node), None);
+        assert_eq!(prefix.first_different_nybble(&node), None);
     }
 }
 
--- a/rust/hg-core/src/revlog/nodemap.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/revlog/nodemap.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -13,31 +13,23 @@
 //! is used in a more abstract context.
 
 use super::{
-    node::NULL_NODE, Node, NodeError, NodePrefix, NodePrefixRef, Revision,
-    RevlogIndex, NULL_REVISION,
+    node::NULL_NODE, Node, NodePrefix, Revision, RevlogIndex, NULL_REVISION,
 };
 
+use bytes_cast::{unaligned, BytesCast};
 use std::cmp::max;
 use std::fmt;
-use std::mem;
+use std::mem::{self, align_of, size_of};
 use std::ops::Deref;
 use std::ops::Index;
-use std::slice;
 
 #[derive(Debug, PartialEq)]
 pub enum NodeMapError {
     MultipleResults,
-    InvalidNodePrefix(NodeError),
     /// A `Revision` stored in the nodemap could not be found in the index
     RevisionNotInIndex(Revision),
 }
 
-impl From<NodeError> for NodeMapError {
-    fn from(err: NodeError) -> Self {
-        NodeMapError::InvalidNodePrefix(err)
-    }
-}
-
 /// Mapping system from Mercurial nodes to revision numbers.
 ///
 /// ## `RevlogIndex` and `NodeMap`
@@ -82,24 +74,9 @@
     fn find_bin<'a>(
         &self,
         idx: &impl RevlogIndex,
-        prefix: NodePrefixRef<'a>,
+        prefix: NodePrefix,
     ) -> Result<Option<Revision>, NodeMapError>;
 
-    /// Find the unique Revision whose `Node` hexadecimal string representation
-    /// starts with a given prefix
-    ///
-    /// If no Revision matches the given prefix, `Ok(None)` is returned.
-    ///
-    /// If several Revisions match the given prefix, a [`MultipleResults`]
-    /// error is returned.
-    fn find_hex(
-        &self,
-        idx: &impl RevlogIndex,
-        prefix: &str,
-    ) -> Result<Option<Revision>, NodeMapError> {
-        self.find_bin(idx, NodePrefix::from_hex(prefix)?.borrow())
-    }
-
     /// Give the size of the shortest node prefix that determines
     /// the revision uniquely.
     ///
@@ -114,19 +91,9 @@
     fn unique_prefix_len_bin<'a>(
         &self,
         idx: &impl RevlogIndex,
-        node_prefix: NodePrefixRef<'a>,
+        node_prefix: NodePrefix,
     ) -> Result<Option<usize>, NodeMapError>;
 
-    /// Same as `unique_prefix_len_bin`, with the hexadecimal representation
-    /// of the prefix as input.
-    fn unique_prefix_len_hex(
-        &self,
-        idx: &impl RevlogIndex,
-        prefix: &str,
-    ) -> Result<Option<usize>, NodeMapError> {
-        self.unique_prefix_len_bin(idx, NodePrefix::from_hex(prefix)?.borrow())
-    }
-
     /// Same as `unique_prefix_len_bin`, with a full `Node` as input
     fn unique_prefix_len_node(
         &self,
@@ -149,7 +116,7 @@
 /// Low level NodeTree [`Blocks`] elements
 ///
 /// These are exactly as for instance on persistent storage.
-type RawElement = i32;
+type RawElement = unaligned::I32Be;
 
 /// High level representation of values in NodeTree
 /// [`Blocks`](struct.Block.html)
@@ -168,23 +135,24 @@
     ///
     /// See [`Block`](struct.Block.html) for explanation about the encoding.
     fn from(raw: RawElement) -> Element {
-        if raw >= 0 {
-            Element::Block(raw as usize)
-        } else if raw == -1 {
+        let int = raw.get();
+        if int >= 0 {
+            Element::Block(int as usize)
+        } else if int == -1 {
             Element::None
         } else {
-            Element::Rev(-raw - 2)
+            Element::Rev(-int - 2)
         }
     }
 }
 
 impl From<Element> for RawElement {
     fn from(element: Element) -> RawElement {
-        match element {
+        RawElement::from(match element {
             Element::None => 0,
-            Element::Block(i) => i as RawElement,
+            Element::Block(i) => i as i32,
             Element::Rev(rev) => -rev - 2,
-        }
+        })
     }
 }
 
@@ -212,42 +180,24 @@
 /// represented at all, because we want an immutable empty nodetree
 /// to be valid.
 
-#[derive(Copy, Clone)]
-pub struct Block([u8; BLOCK_SIZE]);
+const ELEMENTS_PER_BLOCK: usize = 16; // number of different values in a nybble
 
-/// Not derivable for arrays of length >32 until const generics are stable
-impl PartialEq for Block {
-    fn eq(&self, other: &Self) -> bool {
-        self.0[..] == other.0[..]
-    }
-}
-
-pub const BLOCK_SIZE: usize = 64;
+#[derive(Copy, Clone, BytesCast, PartialEq)]
+#[repr(transparent)]
+pub struct Block([RawElement; ELEMENTS_PER_BLOCK]);
 
 impl Block {
     fn new() -> Self {
-        // -1 in 2's complement to create an absent node
-        let byte: u8 = 255;
-        Block([byte; BLOCK_SIZE])
+        let absent_node = RawElement::from(-1);
+        Block([absent_node; ELEMENTS_PER_BLOCK])
     }
 
     fn get(&self, nybble: u8) -> Element {
-        let index = nybble as usize * mem::size_of::<RawElement>();
-        Element::from(RawElement::from_be_bytes([
-            self.0[index],
-            self.0[index + 1],
-            self.0[index + 2],
-            self.0[index + 3],
-        ]))
+        self.0[nybble as usize].into()
     }
 
     fn set(&mut self, nybble: u8, element: Element) {
-        let values = RawElement::to_be_bytes(element.into());
-        let index = nybble as usize * mem::size_of::<RawElement>();
-        self.0[index] = values[0];
-        self.0[index + 1] = values[1];
-        self.0[index + 2] = values[2];
-        self.0[index + 3] = values[3];
+        self.0[nybble as usize] = element.into()
     }
 }
 
@@ -295,7 +245,7 @@
 /// Return `None` unless the `Node` for `rev` has given prefix in `index`.
 fn has_prefix_or_none(
     idx: &impl RevlogIndex,
-    prefix: NodePrefixRef,
+    prefix: NodePrefix,
     rev: Revision,
 ) -> Result<Option<Revision>, NodeMapError> {
     idx.node(rev)
@@ -316,7 +266,7 @@
 /// revision is the only one for a *subprefix* of the one being looked up.
 fn validate_candidate(
     idx: &impl RevlogIndex,
-    prefix: NodePrefixRef,
+    prefix: NodePrefix,
     candidate: (Option<Revision>, usize),
 ) -> Result<(Option<Revision>, usize), NodeMapError> {
     let (rev, steps) = candidate;
@@ -398,16 +348,17 @@
         // Transmute the `Vec<Block>` to a `Vec<u8>`. Blocks are contiguous
         // bytes, so this is perfectly safe.
         let bytes = unsafe {
-            // Assert that `Block` hasn't been changed and has no padding
-            let _: [u8; 4 * BLOCK_SIZE] =
-                std::mem::transmute([Block::new(); 4]);
+            // Check for compatible allocation layout.
+            // (Optimized away by constant-folding + dead code elimination.)
+            assert_eq!(size_of::<Block>(), 64);
+            assert_eq!(align_of::<Block>(), 1);
 
             // /!\ Any use of `vec` after this is use-after-free.
             // TODO: use `into_raw_parts` once stabilized
             Vec::from_raw_parts(
                 vec.as_ptr() as *mut u8,
-                vec.len() * BLOCK_SIZE,
-                vec.capacity() * BLOCK_SIZE,
+                vec.len() * size_of::<Block>(),
+                vec.capacity() * size_of::<Block>(),
             )
         };
         (readonly, bytes)
@@ -442,7 +393,7 @@
     /// `NodeTree`).
     fn lookup(
         &self,
-        prefix: NodePrefixRef,
+        prefix: NodePrefix,
     ) -> Result<(Option<Revision>, usize), NodeMapError> {
         for (i, visit_item) in self.visit(prefix).enumerate() {
             if let Some(opt) = visit_item.final_revision() {
@@ -452,10 +403,7 @@
         Err(NodeMapError::MultipleResults)
     }
 
-    fn visit<'n, 'p>(
-        &'n self,
-        prefix: NodePrefixRef<'p>,
-    ) -> NodeTreeVisitor<'n, 'p> {
+    fn visit<'n>(&'n self, prefix: NodePrefix) -> NodeTreeVisitor<'n> {
         NodeTreeVisitor {
             nt: self,
             prefix,
@@ -613,7 +561,7 @@
         amount: usize,
     ) -> Self {
         assert!(buffer.len() >= amount);
-        let len_in_blocks = amount / BLOCK_SIZE;
+        let len_in_blocks = amount / size_of::<Block>();
         NodeTreeBytes {
             buffer,
             len_in_blocks,
@@ -625,18 +573,17 @@
     type Target = [Block];
 
     fn deref(&self) -> &[Block] {
-        unsafe {
-            slice::from_raw_parts(
-                (&self.buffer).as_ptr() as *const Block,
-                self.len_in_blocks,
-            )
-        }
+        Block::slice_from_bytes(&self.buffer, self.len_in_blocks)
+            // `NodeTreeBytes::new` already asserted that `self.buffer` is
+            // large enough.
+            .unwrap()
+            .0
     }
 }
 
-struct NodeTreeVisitor<'n, 'p> {
+struct NodeTreeVisitor<'n> {
     nt: &'n NodeTree,
-    prefix: NodePrefixRef<'p>,
+    prefix: NodePrefix,
     visit: usize,
     nybble_idx: usize,
     done: bool,
@@ -649,11 +596,11 @@
     element: Element,
 }
 
-impl<'n, 'p> Iterator for NodeTreeVisitor<'n, 'p> {
+impl<'n> Iterator for NodeTreeVisitor<'n> {
     type Item = NodeTreeVisitItem;
 
     fn next(&mut self) -> Option<Self::Item> {
-        if self.done || self.nybble_idx >= self.prefix.len() {
+        if self.done || self.nybble_idx >= self.prefix.nybbles_len() {
             return None;
         }
 
@@ -718,18 +665,18 @@
     fn find_bin<'a>(
         &self,
         idx: &impl RevlogIndex,
-        prefix: NodePrefixRef<'a>,
+        prefix: NodePrefix,
     ) -> Result<Option<Revision>, NodeMapError> {
-        validate_candidate(idx, prefix.clone(), self.lookup(prefix)?)
+        validate_candidate(idx, prefix, self.lookup(prefix)?)
             .map(|(opt, _shortest)| opt)
     }
 
     fn unique_prefix_len_bin<'a>(
         &self,
         idx: &impl RevlogIndex,
-        prefix: NodePrefixRef<'a>,
+        prefix: NodePrefix,
     ) -> Result<Option<usize>, NodeMapError> {
-        validate_candidate(idx, prefix.clone(), self.lookup(prefix)?)
+        validate_candidate(idx, prefix, self.lookup(prefix)?)
             .map(|(opt, shortest)| opt.map(|_rev| shortest))
     }
 }
@@ -774,13 +721,13 @@
         let mut raw = [255u8; 64];
 
         let mut counter = 0;
-        for val in [0, 15, -2, -1, -3].iter() {
-            for byte in RawElement::to_be_bytes(*val).iter() {
+        for val in [0_i32, 15, -2, -1, -3].iter() {
+            for byte in val.to_be_bytes().iter() {
                 raw[counter] = *byte;
                 counter += 1;
             }
         }
-        let block = Block(raw);
+        let (block, _) = Block::from_bytes(&raw).unwrap();
         assert_eq!(block.get(0), Element::Block(0));
         assert_eq!(block.get(1), Element::Block(15));
         assert_eq!(block.get(3), Element::None);
@@ -822,6 +769,10 @@
         ])
     }
 
+    fn hex(s: &str) -> NodePrefix {
+        NodePrefix::from_hex(s).unwrap()
+    }
+
     #[test]
     fn test_nt_debug() {
         let nt = sample_nodetree();
@@ -840,11 +791,11 @@
         pad_insert(&mut idx, 1, "1234deadcafe");
 
         let nt = NodeTree::from(vec![block! {1: Rev(1)}]);
-        assert_eq!(nt.find_hex(&idx, "1")?, Some(1));
-        assert_eq!(nt.find_hex(&idx, "12")?, Some(1));
-        assert_eq!(nt.find_hex(&idx, "1234de")?, Some(1));
-        assert_eq!(nt.find_hex(&idx, "1a")?, None);
-        assert_eq!(nt.find_hex(&idx, "ab")?, None);
+        assert_eq!(nt.find_bin(&idx, hex("1"))?, Some(1));
+        assert_eq!(nt.find_bin(&idx, hex("12"))?, Some(1));
+        assert_eq!(nt.find_bin(&idx, hex("1234de"))?, Some(1));
+        assert_eq!(nt.find_bin(&idx, hex("1a"))?, None);
+        assert_eq!(nt.find_bin(&idx, hex("ab"))?, None);
 
         // and with full binary Nodes
         assert_eq!(nt.find_node(&idx, idx.get(&1).unwrap())?, Some(1));
@@ -861,12 +812,12 @@
 
         let nt = sample_nodetree();
 
-        assert_eq!(nt.find_hex(&idx, "0"), Err(MultipleResults));
-        assert_eq!(nt.find_hex(&idx, "01"), Ok(Some(9)));
-        assert_eq!(nt.find_hex(&idx, "00"), Err(MultipleResults));
-        assert_eq!(nt.find_hex(&idx, "00a"), Ok(Some(0)));
-        assert_eq!(nt.unique_prefix_len_hex(&idx, "00a"), Ok(Some(3)));
-        assert_eq!(nt.find_hex(&idx, "000"), Ok(Some(NULL_REVISION)));
+        assert_eq!(nt.find_bin(&idx, hex("0")), Err(MultipleResults));
+        assert_eq!(nt.find_bin(&idx, hex("01")), Ok(Some(9)));
+        assert_eq!(nt.find_bin(&idx, hex("00")), Err(MultipleResults));
+        assert_eq!(nt.find_bin(&idx, hex("00a")), Ok(Some(0)));
+        assert_eq!(nt.unique_prefix_len_bin(&idx, hex("00a")), Ok(Some(3)));
+        assert_eq!(nt.find_bin(&idx, hex("000")), Ok(Some(NULL_REVISION)));
     }
 
     #[test]
@@ -884,13 +835,13 @@
             root: block![0: Block(1), 1:Block(3), 12: Rev(2)],
             masked_inner_blocks: 1,
         };
-        assert_eq!(nt.find_hex(&idx, "10")?, Some(1));
-        assert_eq!(nt.find_hex(&idx, "c")?, Some(2));
-        assert_eq!(nt.unique_prefix_len_hex(&idx, "c")?, Some(1));
-        assert_eq!(nt.find_hex(&idx, "00"), Err(MultipleResults));
-        assert_eq!(nt.find_hex(&idx, "000")?, Some(NULL_REVISION));
-        assert_eq!(nt.unique_prefix_len_hex(&idx, "000")?, Some(3));
-        assert_eq!(nt.find_hex(&idx, "01")?, Some(9));
+        assert_eq!(nt.find_bin(&idx, hex("10"))?, Some(1));
+        assert_eq!(nt.find_bin(&idx, hex("c"))?, Some(2));
+        assert_eq!(nt.unique_prefix_len_bin(&idx, hex("c"))?, Some(1));
+        assert_eq!(nt.find_bin(&idx, hex("00")), Err(MultipleResults));
+        assert_eq!(nt.find_bin(&idx, hex("000"))?, Some(NULL_REVISION));
+        assert_eq!(nt.unique_prefix_len_bin(&idx, hex("000"))?, Some(3));
+        assert_eq!(nt.find_bin(&idx, hex("01"))?, Some(9));
         assert_eq!(nt.masked_readonly_blocks(), 2);
         Ok(())
     }
@@ -923,14 +874,14 @@
             &self,
             prefix: &str,
         ) -> Result<Option<Revision>, NodeMapError> {
-            self.nt.find_hex(&self.index, prefix)
+            self.nt.find_bin(&self.index, hex(prefix))
         }
 
         fn unique_prefix_len_hex(
             &self,
             prefix: &str,
         ) -> Result<Option<usize>, NodeMapError> {
-            self.nt.unique_prefix_len_hex(&self.index, prefix)
+            self.nt.unique_prefix_len_bin(&self.index, hex(prefix))
         }
 
         /// Drain `added` and restart a new one
@@ -1108,7 +1059,7 @@
         let (_, bytes) = idx.nt.into_readonly_and_added_bytes();
 
         // only the root block has been changed
-        assert_eq!(bytes.len(), BLOCK_SIZE);
+        assert_eq!(bytes.len(), size_of::<Block>());
         // big endian for -2
         assert_eq!(&bytes[4..2 * 4], [255, 255, 255, 254]);
         // big endian for -6
--- a/rust/hg-core/src/revlog/nodemap_docket.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/revlog/nodemap_docket.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -1,5 +1,7 @@
+use crate::errors::{HgError, HgResultExt};
+use crate::requirements;
+use bytes_cast::{unaligned, BytesCast};
 use memmap::Mmap;
-use std::convert::TryInto;
 use std::path::{Path, PathBuf};
 
 use super::revlog::RevlogError;
@@ -13,6 +15,16 @@
     // TODO: keep here more of the data from `parse()` when we need it
 }
 
+#[derive(BytesCast)]
+#[repr(C)]
+struct DocketHeader {
+    uid_size: u8,
+    _tip_rev: unaligned::U64Be,
+    data_length: unaligned::U64Be,
+    _data_unused: unaligned::U64Be,
+    tip_node_size: unaligned::U64Be,
+}
+
 impl NodeMapDocket {
     /// Return `Ok(None)` when the caller should proceed without a persistent
     /// nodemap:
@@ -27,83 +39,71 @@
         repo: &Repo,
         index_path: &Path,
     ) -> Result<Option<(Self, Mmap)>, RevlogError> {
+        if !repo
+            .requirements()
+            .contains(requirements::NODEMAP_REQUIREMENT)
+        {
+            // If .hg/requires does not opt it, don’t try to open a nodemap
+            return Ok(None);
+        }
+
         let docket_path = index_path.with_extension("n");
-        let docket_bytes = match repo.store_vfs().read(&docket_path) {
-            Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
-                return Ok(None)
-            }
-            Err(e) => return Err(RevlogError::IoError(e)),
-            Ok(bytes) => bytes,
+        let docket_bytes = if let Some(bytes) =
+            repo.store_vfs().read(&docket_path).io_not_found_as_none()?
+        {
+            bytes
+        } else {
+            return Ok(None);
         };
 
-        let mut input = if let Some((&ONDISK_VERSION, rest)) =
+        let input = if let Some((&ONDISK_VERSION, rest)) =
             docket_bytes.split_first()
         {
             rest
         } else {
             return Ok(None);
         };
-        let input = &mut input;
 
-        let uid_size = read_u8(input)? as usize;
-        let _tip_rev = read_be_u64(input)?;
+        /// Treat any error as a parse error
+        fn parse<T, E>(result: Result<T, E>) -> Result<T, RevlogError> {
+            result.map_err(|_| {
+                HgError::corrupted("nodemap docket parse error").into()
+            })
+        }
+
+        let (header, rest) = parse(DocketHeader::from_bytes(input))?;
+        let uid_size = header.uid_size as usize;
         // TODO: do we care about overflow for 4 GB+ nodemap files on 32-bit
         // systems?
-        let data_length = read_be_u64(input)? as usize;
-        let _data_unused = read_be_u64(input)?;
-        let tip_node_size = read_be_u64(input)? as usize;
-        let uid = read_bytes(input, uid_size)?;
-        let _tip_node = read_bytes(input, tip_node_size)?;
-
-        let uid =
-            std::str::from_utf8(uid).map_err(|_| RevlogError::Corrupted)?;
+        let tip_node_size = header.tip_node_size.get() as usize;
+        let data_length = header.data_length.get() as usize;
+        let (uid, rest) = parse(u8::slice_from_bytes(rest, uid_size))?;
+        let (_tip_node, _rest) =
+            parse(u8::slice_from_bytes(rest, tip_node_size))?;
+        let uid = parse(std::str::from_utf8(uid))?;
         let docket = NodeMapDocket { data_length };
 
         let data_path = rawdata_path(&docket_path, uid);
-        // TODO: use `std::fs::read` here when the `persistent-nodemap.mmap`
+        // TODO: use `vfs.read()` here when the `persistent-nodemap.mmap`
         // config is false?
-        match repo.store_vfs().mmap_open(&data_path) {
-            Ok(mmap) => {
-                if mmap.len() >= data_length {
-                    Ok(Some((docket, mmap)))
-                } else {
-                    Err(RevlogError::Corrupted)
-                }
+        if let Some(mmap) = repo
+            .store_vfs()
+            .mmap_open(&data_path)
+            .io_not_found_as_none()?
+        {
+            if mmap.len() >= data_length {
+                Ok(Some((docket, mmap)))
+            } else {
+                Err(HgError::corrupted("persistent nodemap too short").into())
             }
-            Err(error) => {
-                if error.kind() == std::io::ErrorKind::NotFound {
-                    Ok(None)
-                } else {
-                    Err(RevlogError::IoError(error))
-                }
-            }
+        } else {
+            // Even if .hg/requires opted in, some revlogs are deemed small
+            // enough to not need a persistent nodemap.
+            Ok(None)
         }
     }
 }
 
-fn read_bytes<'a>(
-    input: &mut &'a [u8],
-    count: usize,
-) -> Result<&'a [u8], RevlogError> {
-    if let Some(start) = input.get(..count) {
-        *input = &input[count..];
-        Ok(start)
-    } else {
-        Err(RevlogError::Corrupted)
-    }
-}
-
-fn read_u8<'a>(input: &mut &[u8]) -> Result<u8, RevlogError> {
-    Ok(read_bytes(input, 1)?[0])
-}
-
-fn read_be_u64<'a>(input: &mut &[u8]) -> Result<u64, RevlogError> {
-    let array = read_bytes(input, std::mem::size_of::<u64>())?
-        .try_into()
-        .unwrap();
-    Ok(u64::from_be_bytes(array))
-}
-
 fn rawdata_path(docket_path: &Path, uid: &str) -> PathBuf {
     let docket_name = docket_path
         .file_name()
--- a/rust/hg-core/src/revlog/revlog.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/revlog/revlog.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -11,22 +11,37 @@
 use zstd;
 
 use super::index::Index;
-use super::node::{NodePrefixRef, NODE_BYTES_LENGTH, NULL_NODE};
+use super::node::{NodePrefix, NODE_BYTES_LENGTH, NULL_NODE};
 use super::nodemap;
-use super::nodemap::NodeMap;
+use super::nodemap::{NodeMap, NodeMapError};
 use super::nodemap_docket::NodeMapDocket;
 use super::patch;
+use crate::errors::HgError;
 use crate::repo::Repo;
 use crate::revlog::Revision;
 
+#[derive(derive_more::From)]
 pub enum RevlogError {
-    IoError(std::io::Error),
-    UnsuportedVersion(u16),
     InvalidRevision,
     /// Found more than one entry whose ID match the requested prefix
     AmbiguousPrefix,
-    Corrupted,
-    UnknowDataFormat(u8),
+    #[from]
+    Other(HgError),
+}
+
+impl From<NodeMapError> for RevlogError {
+    fn from(error: NodeMapError) -> Self {
+        match error {
+            NodeMapError::MultipleResults => RevlogError::AmbiguousPrefix,
+            NodeMapError::RevisionNotInIndex(_) => RevlogError::corrupted(),
+        }
+    }
+}
+
+impl RevlogError {
+    fn corrupted() -> Self {
+        RevlogError::Other(HgError::corrupted("corrupted revlog"))
+    }
 }
 
 /// Read only implementation of revlog.
@@ -34,7 +49,7 @@
     /// When index and data are not interleaved: bytes of the revlog index.
     /// When index and data are interleaved: bytes of the revlog index and
     /// data.
-    index: Index,
+    pub(crate) index: Index,
     /// When index and data are not interleaved: bytes of the revlog data
     data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
     /// When present on disk: the persistent nodemap for this revlog
@@ -53,14 +68,12 @@
         data_path: Option<&Path>,
     ) -> Result<Self, RevlogError> {
         let index_path = index_path.as_ref();
-        let index_mmap = repo
-            .store_vfs()
-            .mmap_open(&index_path)
-            .map_err(RevlogError::IoError)?;
+        let index_mmap = repo.store_vfs().mmap_open(&index_path)?;
 
         let version = get_version(&index_mmap);
         if version != 1 {
-            return Err(RevlogError::UnsuportedVersion(version));
+            // A proper new version should have had a repo/store requirement.
+            return Err(RevlogError::corrupted());
         }
 
         let index = Index::new(Box::new(index_mmap))?;
@@ -74,10 +87,7 @@
                 None
             } else {
                 let data_path = data_path.unwrap_or(&default_data_path);
-                let data_mmap = repo
-                    .store_vfs()
-                    .mmap_open(data_path)
-                    .map_err(RevlogError::IoError)?;
+                let data_mmap = repo.store_vfs().mmap_open(data_path)?;
                 Some(Box::new(data_mmap))
             };
 
@@ -111,13 +121,11 @@
     #[timed]
     pub fn get_node_rev(
         &self,
-        node: NodePrefixRef,
+        node: NodePrefix,
     ) -> Result<Revision, RevlogError> {
         if let Some(nodemap) = &self.nodemap {
             return nodemap
-                .find_bin(&self.index, node)
-                // TODO: propagate details of this error:
-                .map_err(|_| RevlogError::Corrupted)?
+                .find_bin(&self.index, node)?
                 .ok_or(RevlogError::InvalidRevision);
         }
 
@@ -130,7 +138,9 @@
         let mut found_by_prefix = None;
         for rev in (0..self.len() as Revision).rev() {
             let index_entry =
-                self.index.get_entry(rev).ok_or(RevlogError::Corrupted)?;
+                self.index.get_entry(rev).ok_or(HgError::corrupted(
+                    "revlog references a revision not in the index",
+                ))?;
             if node == *index_entry.hash() {
                 return Ok(rev);
             }
@@ -144,6 +154,11 @@
         found_by_prefix.ok_or(RevlogError::InvalidRevision)
     }
 
+    /// Returns whether the given revision exists in this revlog.
+    pub fn has_rev(&self, rev: Revision) -> bool {
+        self.index.get_entry(rev).is_some()
+    }
+
     /// Return the full data associated to a revision.
     ///
     /// All entries required to build the final data out of deltas will be
@@ -156,8 +171,9 @@
         let mut delta_chain = vec![];
         while let Some(base_rev) = entry.base_rev {
             delta_chain.push(entry);
-            entry =
-                self.get_entry(base_rev).or(Err(RevlogError::Corrupted))?;
+            entry = self
+                .get_entry(base_rev)
+                .map_err(|_| RevlogError::corrupted())?;
         }
 
         // TODO do not look twice in the index
@@ -180,7 +196,7 @@
         ) {
             Ok(data)
         } else {
-            Err(RevlogError::Corrupted)
+            Err(RevlogError::corrupted())
         }
     }
 
@@ -290,7 +306,8 @@
             b'x' => Ok(Cow::Owned(self.uncompressed_zlib_data()?)),
             // zstd data.
             b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)),
-            format_type => Err(RevlogError::UnknowDataFormat(format_type)),
+            // A proper new format should have had a repo/store requirement.
+            _format_type => Err(RevlogError::corrupted()),
         }
     }
 
@@ -300,13 +317,13 @@
             let mut buf = Vec::with_capacity(self.compressed_len);
             decoder
                 .read_to_end(&mut buf)
-                .or(Err(RevlogError::Corrupted))?;
+                .map_err(|_| RevlogError::corrupted())?;
             Ok(buf)
         } else {
             let mut buf = vec![0; self.uncompressed_len];
             decoder
                 .read_exact(&mut buf)
-                .or(Err(RevlogError::Corrupted))?;
+                .map_err(|_| RevlogError::corrupted())?;
             Ok(buf)
         }
     }
@@ -315,14 +332,14 @@
         if self.is_delta() {
             let mut buf = Vec::with_capacity(self.compressed_len);
             zstd::stream::copy_decode(self.bytes, &mut buf)
-                .or(Err(RevlogError::Corrupted))?;
+                .map_err(|_| RevlogError::corrupted())?;
             Ok(buf)
         } else {
             let mut buf = vec![0; self.uncompressed_len];
             let len = zstd::block::decompress_to_buffer(self.bytes, &mut buf)
-                .or(Err(RevlogError::Corrupted))?;
+                .map_err(|_| RevlogError::corrupted())?;
             if len != self.uncompressed_len {
-                Err(RevlogError::Corrupted)
+                Err(RevlogError::corrupted())
             } else {
                 Ok(buf)
             }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/revset.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,57 @@
+//! The revset query language
+//!
+//! <https://www.mercurial-scm.org/repo/hg/help/revsets>
+
+use crate::errors::HgError;
+use crate::repo::Repo;
+use crate::revlog::changelog::Changelog;
+use crate::revlog::revlog::{Revlog, RevlogError};
+use crate::revlog::NodePrefix;
+use crate::revlog::{Revision, NULL_REVISION};
+
+/// Resolve a query string into a single revision.
+///
+/// Only some of the revset language is implemented yet.
+pub fn resolve_single(
+    input: &str,
+    repo: &Repo,
+) -> Result<Revision, RevlogError> {
+    let changelog = Changelog::open(repo)?;
+
+    match resolve_rev_number_or_hex_prefix(input, &changelog.revlog) {
+        Err(RevlogError::InvalidRevision) => {} // Try other syntax
+        result => return result,
+    }
+
+    if input == "null" {
+        return Ok(NULL_REVISION);
+    }
+
+    // TODO: support for the rest of the language here.
+
+    Err(
+        HgError::unsupported(format!("cannot parse revset '{}'", input))
+            .into(),
+    )
+}
+
+/// Resolve the small subset of the language suitable for revlogs other than
+/// the changelog, such as in `hg debugdata --manifest` CLI argument.
+///
+/// * A non-negative decimal integer for a revision number, or
+/// * An hexadecimal string, for the unique node ID that starts with this
+///   prefix
+pub fn resolve_rev_number_or_hex_prefix(
+    input: &str,
+    revlog: &Revlog,
+) -> Result<Revision, RevlogError> {
+    if let Ok(integer) = input.parse::<i32>() {
+        if integer >= 0 && revlog.has_rev(integer) {
+            return Ok(integer);
+        }
+    }
+    if let Ok(prefix) = NodePrefix::from_hex(input) {
+        return revlog.get_node_rev(prefix);
+    }
+    Err(RevlogError::InvalidRevision)
+}
--- a/rust/hg-core/src/utils.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/utils.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -7,7 +7,12 @@
 
 //! Contains useful functions, traits, structs, etc. for use in core.
 
+use crate::errors::{HgError, IoErrorContext};
 use crate::utils::hg_path::HgPath;
+use im_rc::ordmap::DiffItem;
+use im_rc::ordmap::OrdMap;
+use std::cell::Cell;
+use std::fmt;
 use std::{io::Write, ops::Deref};
 
 pub mod files;
@@ -62,10 +67,12 @@
 }
 
 pub trait SliceExt {
+    fn trim_end_newlines(&self) -> &Self;
     fn trim_end(&self) -> &Self;
     fn trim_start(&self) -> &Self;
     fn trim(&self) -> &Self;
     fn drop_prefix(&self, needle: &Self) -> Option<&Self>;
+    fn split_2(&self, separator: u8) -> Option<(&[u8], &[u8])>;
 }
 
 #[allow(clippy::trivially_copy_pass_by_ref)]
@@ -74,6 +81,13 @@
 }
 
 impl SliceExt for [u8] {
+    fn trim_end_newlines(&self) -> &[u8] {
+        if let Some(last) = self.iter().rposition(|&byte| byte != b'\n') {
+            &self[..=last]
+        } else {
+            &[]
+        }
+    }
     fn trim_end(&self) -> &[u8] {
         if let Some(last) = self.iter().rposition(is_not_whitespace) {
             &self[..=last]
@@ -115,6 +129,13 @@
             None
         }
     }
+
+    fn split_2(&self, separator: u8) -> Option<(&[u8], &[u8])> {
+        let mut iter = self.splitn(2, |&byte| byte == separator);
+        let a = iter.next()?;
+        let b = iter.next()?;
+        Some((a, b))
+    }
 }
 
 pub trait Escaped {
@@ -176,3 +197,287 @@
         None
     }
 }
+
+#[cfg(unix)]
+pub fn shell_quote(value: &[u8]) -> Vec<u8> {
+    // TODO: Use the `matches!` macro when we require Rust 1.42+
+    if value.iter().all(|&byte| match byte {
+        b'a'..=b'z'
+        | b'A'..=b'Z'
+        | b'0'..=b'9'
+        | b'.'
+        | b'_'
+        | b'/'
+        | b'+'
+        | b'-' => true,
+        _ => false,
+    }) {
+        value.to_owned()
+    } else {
+        let mut quoted = Vec::with_capacity(value.len() + 2);
+        quoted.push(b'\'');
+        for &byte in value {
+            if byte == b'\'' {
+                quoted.push(b'\\');
+            }
+            quoted.push(byte);
+        }
+        quoted.push(b'\'');
+        quoted
+    }
+}
+
+pub fn current_dir() -> Result<std::path::PathBuf, HgError> {
+    std::env::current_dir().map_err(|error| HgError::IoError {
+        error,
+        context: IoErrorContext::CurrentDir,
+    })
+}
+
+pub fn current_exe() -> Result<std::path::PathBuf, HgError> {
+    std::env::current_exe().map_err(|error| HgError::IoError {
+        error,
+        context: IoErrorContext::CurrentExe,
+    })
+}
+
+/// Expand `$FOO` and `${FOO}` environment variables in the given byte string
+pub fn expand_vars(s: &[u8]) -> std::borrow::Cow<[u8]> {
+    lazy_static::lazy_static! {
+        /// https://github.com/python/cpython/blob/3.9/Lib/posixpath.py#L301
+        /// The `x` makes whitespace ignored.
+        /// `-u` disables the Unicode flag, which makes `\w` like Python with the ASCII flag.
+        static ref VAR_RE: regex::bytes::Regex =
+            regex::bytes::Regex::new(r"(?x-u)
+                \$
+                (?:
+                    (\w+)
+                    |
+                    \{
+                        ([^}]*)
+                    \}
+                )
+            ").unwrap();
+    }
+    VAR_RE.replace_all(s, |captures: &regex::bytes::Captures| {
+        let var_name = files::get_os_str_from_bytes(
+            captures
+                .get(1)
+                .or_else(|| captures.get(2))
+                .expect("either side of `|` must participate in match")
+                .as_bytes(),
+        );
+        std::env::var_os(var_name)
+            .map(files::get_bytes_from_os_str)
+            .unwrap_or_else(|| {
+                // Referencing an environment variable that does not exist.
+                // Leave the $FOO reference as-is.
+                captures[0].to_owned()
+            })
+    })
+}
+
+#[test]
+fn test_expand_vars() {
+    // Modifying process-global state in a test isn’t great,
+    // but hopefully this won’t collide with anything.
+    std::env::set_var("TEST_EXPAND_VAR", "1");
+    assert_eq!(
+        expand_vars(b"before/$TEST_EXPAND_VAR/after"),
+        &b"before/1/after"[..]
+    );
+    assert_eq!(
+        expand_vars(b"before${TEST_EXPAND_VAR}${TEST_EXPAND_VAR}${TEST_EXPAND_VAR}after"),
+        &b"before111after"[..]
+    );
+    let s = b"before $SOME_LONG_NAME_THAT_WE_ASSUME_IS_NOT_AN_ACTUAL_ENV_VAR after";
+    assert_eq!(expand_vars(s), &s[..]);
+}
+
+pub(crate) enum MergeResult<V> {
+    UseLeftValue,
+    UseRightValue,
+    UseNewValue(V),
+}
+
+/// Return the union of the two given maps,
+/// calling `merge(key, left_value, right_value)` to resolve keys that exist in
+/// both.
+///
+/// CC https://github.com/bodil/im-rs/issues/166
+pub(crate) fn ordmap_union_with_merge<K, V>(
+    left: OrdMap<K, V>,
+    right: OrdMap<K, V>,
+    mut merge: impl FnMut(&K, &V, &V) -> MergeResult<V>,
+) -> OrdMap<K, V>
+where
+    K: Clone + Ord,
+    V: Clone + PartialEq,
+{
+    if left.ptr_eq(&right) {
+        // One of the two maps is an unmodified clone of the other
+        left
+    } else if left.len() / 2 > right.len() {
+        // When two maps have different sizes,
+        // their size difference is a lower bound on
+        // how many keys of the larger map are not also in the smaller map.
+        // This in turn is a lower bound on the number of differences in
+        // `OrdMap::diff` and the "amount of work" that would be done
+        // by `ordmap_union_with_merge_by_diff`.
+        //
+        // Here `left` is more than twice the size of `right`,
+        // so the number of differences is more than the total size of
+        // `right`. Therefore an algorithm based on iterating `right`
+        // is more efficient.
+        //
+        // This helps a lot when a tiny (or empty) map is merged
+        // with a large one.
+        ordmap_union_with_merge_by_iter(left, right, merge)
+    } else if left.len() < right.len() / 2 {
+        // Same as above but with `left` and `right` swapped
+        ordmap_union_with_merge_by_iter(right, left, |key, a, b| {
+            // Also swapped in `merge` arguments:
+            match merge(key, b, a) {
+                MergeResult::UseNewValue(v) => MergeResult::UseNewValue(v),
+                // … and swap back in `merge` result:
+                MergeResult::UseLeftValue => MergeResult::UseRightValue,
+                MergeResult::UseRightValue => MergeResult::UseLeftValue,
+            }
+        })
+    } else {
+        // For maps of similar size, use the algorithm based on `OrdMap::diff`
+        ordmap_union_with_merge_by_diff(left, right, merge)
+    }
+}
+
+/// Efficient if `right` is much smaller than `left`
+fn ordmap_union_with_merge_by_iter<K, V>(
+    mut left: OrdMap<K, V>,
+    right: OrdMap<K, V>,
+    mut merge: impl FnMut(&K, &V, &V) -> MergeResult<V>,
+) -> OrdMap<K, V>
+where
+    K: Clone + Ord,
+    V: Clone,
+{
+    for (key, right_value) in right {
+        match left.get(&key) {
+            None => {
+                left.insert(key, right_value);
+            }
+            Some(left_value) => match merge(&key, left_value, &right_value) {
+                MergeResult::UseLeftValue => {}
+                MergeResult::UseRightValue => {
+                    left.insert(key, right_value);
+                }
+                MergeResult::UseNewValue(new_value) => {
+                    left.insert(key, new_value);
+                }
+            },
+        }
+    }
+    left
+}
+
+/// Fallback when both maps are of similar size
+fn ordmap_union_with_merge_by_diff<K, V>(
+    mut left: OrdMap<K, V>,
+    mut right: OrdMap<K, V>,
+    mut merge: impl FnMut(&K, &V, &V) -> MergeResult<V>,
+) -> OrdMap<K, V>
+where
+    K: Clone + Ord,
+    V: Clone + PartialEq,
+{
+    // (key, value) pairs that would need to be inserted in either map
+    // in order to turn it into the union.
+    //
+    // TODO: if/when https://github.com/bodil/im-rs/pull/168 is accepted,
+    // change these from `Vec<(K, V)>` to `Vec<(&K, Cow<V>)>`
+    // with `left_updates` only borrowing from `right` and `right_updates` from
+    // `left`, and with `Cow::Owned` used for `MergeResult::UseNewValue`.
+    //
+    // This would allow moving all `.clone()` calls to after we’ve decided
+    // which of `right_updates` or `left_updates` to use
+    // (value ones becoming `Cow::into_owned`),
+    // and avoid making clones we don’t end up using.
+    let mut left_updates = Vec::new();
+    let mut right_updates = Vec::new();
+
+    for difference in left.diff(&right) {
+        match difference {
+            DiffItem::Add(key, value) => {
+                left_updates.push((key.clone(), value.clone()))
+            }
+            DiffItem::Remove(key, value) => {
+                right_updates.push((key.clone(), value.clone()))
+            }
+            DiffItem::Update {
+                old: (key, left_value),
+                new: (_, right_value),
+            } => match merge(key, left_value, right_value) {
+                MergeResult::UseLeftValue => {
+                    right_updates.push((key.clone(), left_value.clone()))
+                }
+                MergeResult::UseRightValue => {
+                    left_updates.push((key.clone(), right_value.clone()))
+                }
+                MergeResult::UseNewValue(new_value) => {
+                    left_updates.push((key.clone(), new_value.clone()));
+                    right_updates.push((key.clone(), new_value))
+                }
+            },
+        }
+    }
+    if left_updates.len() < right_updates.len() {
+        for (key, value) in left_updates {
+            left.insert(key, value);
+        }
+        left
+    } else {
+        for (key, value) in right_updates {
+            right.insert(key, value);
+        }
+        right
+    }
+}
+
+/// Join items of the iterable with the given separator, similar to Python’s
+/// `separator.join(iter)`.
+///
+/// Formatting the return value consumes the iterator.
+/// Formatting it again will produce an empty string.
+pub fn join_display(
+    iter: impl IntoIterator<Item = impl fmt::Display>,
+    separator: impl fmt::Display,
+) -> impl fmt::Display {
+    JoinDisplay {
+        iter: Cell::new(Some(iter.into_iter())),
+        separator,
+    }
+}
+
+struct JoinDisplay<I, S> {
+    iter: Cell<Option<I>>,
+    separator: S,
+}
+
+impl<I, T, S> fmt::Display for JoinDisplay<I, S>
+where
+    I: Iterator<Item = T>,
+    T: fmt::Display,
+    S: fmt::Display,
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        if let Some(mut iter) = self.iter.take() {
+            if let Some(first) = iter.next() {
+                first.fmt(f)?;
+            }
+            for value in iter {
+                self.separator.fmt(f)?;
+                value.fmt(f)?;
+            }
+        }
+        Ok(())
+    }
+}
--- a/rust/hg-core/src/utils/files.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/utils/files.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -17,13 +17,13 @@
 use lazy_static::lazy_static;
 use same_file::is_same_file;
 use std::borrow::{Cow, ToOwned};
+use std::ffi::OsStr;
 use std::fs::Metadata;
-use std::io::Read;
 use std::iter::FusedIterator;
 use std::ops::Deref;
 use std::path::{Path, PathBuf};
 
-pub fn get_path_from_bytes(bytes: &[u8]) -> &Path {
+pub fn get_os_str_from_bytes(bytes: &[u8]) -> &OsStr {
     let os_str;
     #[cfg(unix)]
     {
@@ -33,16 +33,24 @@
     // TODO Handle other platforms
     // TODO: convert from WTF8 to Windows MBCS (ANSI encoding).
     // Perhaps, the return type would have to be Result<PathBuf>.
+    os_str
+}
 
-    Path::new(os_str)
+pub fn get_path_from_bytes(bytes: &[u8]) -> &Path {
+    Path::new(get_os_str_from_bytes(bytes))
 }
 
 // TODO: need to convert from WTF8 to MBCS bytes on Windows.
 // that's why Vec<u8> is returned.
 #[cfg(unix)]
 pub fn get_bytes_from_path(path: impl AsRef<Path>) -> Vec<u8> {
+    get_bytes_from_os_str(path.as_ref())
+}
+
+#[cfg(unix)]
+pub fn get_bytes_from_os_str(str: impl AsRef<OsStr>) -> Vec<u8> {
     use std::os::unix::ffi::OsStrExt;
-    path.as_ref().as_os_str().as_bytes().to_vec()
+    str.as_ref().as_bytes().to_vec()
 }
 
 /// An iterator over repository path yielding itself and its ancestors.
@@ -191,6 +199,12 @@
             st_ctime: metadata.ctime(),
         }
     }
+
+    pub fn is_symlink(&self) -> bool {
+        // This is way too manual, but `HgMetadata` will go away in the
+        // near-future dirstate rewrite anyway.
+        self.st_mode & 0170000 == 0120000
+    }
 }
 
 /// Returns the canonical path of `name`, given `cwd` and `root`
@@ -276,7 +290,13 @@
     if cwd.as_ref().is_empty() {
         Cow::Borrowed(path.as_bytes())
     } else {
-        let mut res: Vec<u8> = Vec::new();
+        // This is not all accurate as to how large `res` will actually be, but
+        // profiling `rhg files` on a large-ish repo shows it’s better than
+        // starting from a zero-capacity `Vec` and letting `extend` reallocate
+        // repeatedly.
+        let guesstimate = path.as_bytes().len();
+
+        let mut res: Vec<u8> = Vec::with_capacity(guesstimate);
         let mut path_iter = path.as_bytes().split(|b| *b == b'/').peekable();
         let mut cwd_iter =
             cwd.as_ref().as_bytes().split(|b| *b == b'/').peekable();
@@ -309,17 +329,6 @@
     }
 }
 
-/// Reads a file in one big chunk instead of doing multiple reads
-pub fn read_whole_file(filepath: &Path) -> std::io::Result<Vec<u8>> {
-    let mut file = std::fs::File::open(filepath)?;
-    let size = file.metadata()?.len();
-
-    let mut res = vec![0; size as usize];
-    file.read_exact(&mut res)?;
-
-    Ok(res)
-}
-
 #[cfg(test)]
 mod tests {
     use super::*;
--- a/rust/hg-core/src/utils/hg_path.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-core/src/utils/hg_path.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -47,57 +47,68 @@
     },
 }
 
-impl ToString for HgPathError {
-    fn to_string(&self) -> String {
+impl fmt::Display for HgPathError {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         match self {
             HgPathError::LeadingSlash(bytes) => {
-                format!("Invalid HgPath '{:?}': has a leading slash.", bytes)
+                write!(f, "Invalid HgPath '{:?}': has a leading slash.", bytes)
             }
             HgPathError::ConsecutiveSlashes {
                 bytes,
                 second_slash_index: pos,
-            } => format!(
+            } => write!(
+                f,
                 "Invalid HgPath '{:?}': consecutive slashes at pos {}.",
                 bytes, pos
             ),
             HgPathError::ContainsNullByte {
                 bytes,
                 null_byte_index: pos,
-            } => format!(
+            } => write!(
+                f,
                 "Invalid HgPath '{:?}': contains null byte at pos {}.",
                 bytes, pos
             ),
-            HgPathError::DecodeError(bytes) => {
-                format!("Invalid HgPath '{:?}': could not be decoded.", bytes)
-            }
+            HgPathError::DecodeError(bytes) => write!(
+                f,
+                "Invalid HgPath '{:?}': could not be decoded.",
+                bytes
+            ),
             HgPathError::EndsWithSlash(path) => {
-                format!("Audit failed for '{}': ends with a slash.", path)
+                write!(f, "Audit failed for '{}': ends with a slash.", path)
             }
-            HgPathError::ContainsIllegalComponent(path) => format!(
+            HgPathError::ContainsIllegalComponent(path) => write!(
+                f,
                 "Audit failed for '{}': contains an illegal component.",
                 path
             ),
-            HgPathError::InsideDotHg(path) => format!(
+            HgPathError::InsideDotHg(path) => write!(
+                f,
                 "Audit failed for '{}': is inside the '.hg' folder.",
                 path
             ),
             HgPathError::IsInsideNestedRepo {
                 path,
                 nested_repo: nested,
-            } => format!(
+            } => {
+                write!(f,
                 "Audit failed for '{}': is inside a nested repository '{}'.",
                 path, nested
-            ),
-            HgPathError::TraversesSymbolicLink { path, symlink } => format!(
+            )
+            }
+            HgPathError::TraversesSymbolicLink { path, symlink } => write!(
+                f,
                 "Audit failed for '{}': traverses symbolic link '{}'.",
                 path, symlink
             ),
-            HgPathError::NotFsCompliant(path) => format!(
+            HgPathError::NotFsCompliant(path) => write!(
+                f,
                 "Audit failed for '{}': cannot be turned into a \
                  filesystem path.",
                 path
             ),
-            HgPathError::NotUnderRoot { path, root } => format!(
+            HgPathError::NotUnderRoot { path, root } => write!(
+                f,
                 "Audit failed for '{}': not under root {}.",
                 path.display(),
                 root.display()
@@ -367,7 +378,9 @@
     }
 }
 
-#[derive(Default, Eq, Ord, Clone, PartialEq, PartialOrd, Hash)]
+#[derive(
+    Default, Eq, Ord, Clone, PartialEq, PartialOrd, Hash, derive_more::From,
+)]
 pub struct HgPathBuf {
     inner: Vec<u8>,
 }
@@ -408,12 +421,6 @@
     }
 }
 
-impl From<Vec<u8>> for HgPathBuf {
-    fn from(vec: Vec<u8>) -> Self {
-        Self { inner: vec }
-    }
-}
-
 impl<T: ?Sized + AsRef<HgPath>> From<&T> for HgPathBuf {
     fn from(s: &T) -> HgPathBuf {
         s.as_ref().to_owned()
--- a/rust/hg-cpython/Cargo.toml	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-cpython/Cargo.toml	Thu Mar 18 18:24:59 2021 -0400
@@ -22,6 +22,7 @@
 python3-bin = ["cpython/python3-sys"]
 
 [dependencies]
+crossbeam-channel = "0.4"
 hg-core = { path = "../hg-core"}
 libc = '*'
 log = "0.4.8"
--- a/rust/hg-cpython/src/copy_tracing.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-cpython/src/copy_tracing.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -1,7 +1,7 @@
 use cpython::ObjectProtocol;
-use cpython::PyBool;
 use cpython::PyBytes;
 use cpython::PyDict;
+use cpython::PyDrop;
 use cpython::PyList;
 use cpython::PyModule;
 use cpython::PyObject;
@@ -9,13 +9,63 @@
 use cpython::PyTuple;
 use cpython::Python;
 
-use hg::copy_tracing::combine_changeset_copies;
 use hg::copy_tracing::ChangedFiles;
-use hg::copy_tracing::DataHolder;
-use hg::copy_tracing::RevInfo;
-use hg::copy_tracing::RevInfoMaker;
+use hg::copy_tracing::CombineChangesetCopies;
 use hg::Revision;
 
+use self::pybytes_with_data::PyBytesWithData;
+
+// Module to encapsulate private fields
+mod pybytes_with_data {
+    use cpython::{PyBytes, Python};
+
+    /// Safe abstraction over a `PyBytes` together with the `&[u8]` slice
+    /// that borrows it.
+    ///
+    /// Calling `PyBytes::data` requires a GIL marker but we want to access the
+    /// data in a thread that (ideally) does not need to acquire the GIL.
+    /// This type allows separating the call an the use.
+    pub(super) struct PyBytesWithData {
+        #[allow(unused)]
+        keep_alive: PyBytes,
+
+        /// Borrows the buffer inside `self.keep_alive`,
+        /// but the borrow-checker cannot express self-referential structs.
+        data: *const [u8],
+    }
+
+    fn require_send<T: Send>() {}
+
+    #[allow(unused)]
+    fn static_assert_pybytes_is_send() {
+        require_send::<PyBytes>;
+    }
+
+    // Safety: PyBytes is Send. Raw pointers are not by default,
+    // but here sending one to another thread is fine since we ensure it stays
+    // valid.
+    unsafe impl Send for PyBytesWithData {}
+
+    impl PyBytesWithData {
+        pub fn new(py: Python, bytes: PyBytes) -> Self {
+            Self {
+                data: bytes.data(py),
+                keep_alive: bytes,
+            }
+        }
+
+        pub fn data(&self) -> &[u8] {
+            // Safety: the raw pointer is valid as long as the PyBytes is still
+            // alive, and the returned slice borrows `self`.
+            unsafe { &*self.data }
+        }
+
+        pub fn unwrap(self) -> PyBytes {
+            self.keep_alive
+        }
+    }
+}
+
 /// Combines copies information contained into revision `revs` to build a copy
 /// map.
 ///
@@ -26,88 +76,135 @@
     children_count: PyDict,
     target_rev: Revision,
     rev_info: PyObject,
-    is_ancestor: PyObject,
+    multi_thread: bool,
 ) -> PyResult<PyDict> {
-    let revs: PyResult<_> =
-        revs.iter(py).map(|r| Ok(r.extract(py)?)).collect();
-
-    // Wrap the `is_ancestor` python callback as a Rust closure
-    //
-    // No errors are expected from the Python side, and they will should only
-    // happens in case of programing error or severe data corruption. Such
-    // errors will raise panic and the rust-cpython harness will turn them into
-    // Python exception.
-    let is_ancestor_wrap = |anc: Revision, desc: Revision| -> bool {
-        is_ancestor
-            .call(py, (anc, desc), None)
-            .expect(
-                "rust-copy-tracing: python call  to `is_ancestor` \
-                failed",
-            )
-            .cast_into::<PyBool>(py)
-            .expect(
-                "rust-copy-tracing: python call  to `is_ancestor` \
-                returned unexpected non-Bool value",
-            )
-            .is_true()
-    };
-
-    // Wrap the `rev_info_maker` python callback as a Rust closure
-    //
-    // No errors are expected from the Python side, and they will should only
-    // happens in case of programing error or severe data corruption. Such
-    // errors will raise panic and the rust-cpython harness will turn them into
-    // Python exception.
-    let rev_info_maker: RevInfoMaker<PyBytes> =
-        Box::new(|rev: Revision, d: &mut DataHolder<PyBytes>| -> RevInfo {
-            let res: PyTuple = rev_info
-                .call(py, (rev,), None)
-                .expect("rust-copy-tracing: python call to `rev_info` failed")
-                .cast_into(py)
-                .expect(
-                    "rust-copy_tracing: python call to `rev_info` returned \
-                    unexpected non-Tuple value",
-                );
-            let p1 = res.get_item(py, 0).extract(py).expect(
-                "rust-copy-tracing: rev_info return is invalid, first item \
-                is a not a revision",
-            );
-            let p2 = res.get_item(py, 1).extract(py).expect(
-                "rust-copy-tracing: rev_info return is invalid, first item \
-                is a not a revision",
-            );
-
-            let files = match res.get_item(py, 2).extract::<PyBytes>(py) {
-                Ok(raw) => {
-                    // Give responsability for the raw bytes lifetime to
-                    // hg-core
-                    d.data = Some(raw);
-                    let addrs = d.data.as_ref().expect(
-                        "rust-copy-tracing: failed to get a reference to the \
-                        raw bytes for copy data").data(py);
-                    ChangedFiles::new(addrs)
-                }
-                // value was presumably None, meaning they was no copy data.
-                Err(_) => ChangedFiles::new_empty(),
-            };
-
-            (p1, p2, files)
-        });
-    let children_count: PyResult<_> = children_count
+    let children_count = children_count
         .items(py)
         .iter()
         .map(|(k, v)| Ok((k.extract(py)?, v.extract(py)?)))
-        .collect();
+        .collect::<PyResult<_>>()?;
+
+    /// (Revision number, parent 1, parent 2, copy data for this revision)
+    type RevInfo<Bytes> = (Revision, Revision, Revision, Option<Bytes>);
+
+    let revs_info =
+        revs.iter(py).map(|rev_py| -> PyResult<RevInfo<PyBytes>> {
+            let rev = rev_py.extract(py)?;
+            let tuple: PyTuple =
+                rev_info.call(py, (rev_py,), None)?.cast_into(py)?;
+            let p1 = tuple.get_item(py, 0).extract(py)?;
+            let p2 = tuple.get_item(py, 1).extract(py)?;
+            let opt_bytes = tuple.get_item(py, 2).extract(py)?;
+            Ok((rev, p1, p2, opt_bytes))
+        });
+
+    let path_copies;
+    if !multi_thread {
+        let mut combine_changeset_copies =
+            CombineChangesetCopies::new(children_count);
+
+        for rev_info in revs_info {
+            let (rev, p1, p2, opt_bytes) = rev_info?;
+            let files = match &opt_bytes {
+                Some(bytes) => ChangedFiles::new(bytes.data(py)),
+                // Python None was extracted to Option::None,
+                // meaning there was no copy data.
+                None => ChangedFiles::new_empty(),
+            };
+
+            combine_changeset_copies.add_revision(rev, p1, p2, files)
+        }
+        path_copies = combine_changeset_copies.finish(target_rev)
+    } else {
+        // Use a bounded channel to provide back-pressure:
+        // if the child thread is slower to process revisions than this thread
+        // is to gather data for them, an unbounded channel would keep
+        // growing and eat memory.
+        //
+        // TODO: tweak the bound?
+        let (rev_info_sender, rev_info_receiver) =
+            crossbeam_channel::bounded::<RevInfo<PyBytesWithData>>(1000);
+
+        // This channel (going the other way around) however is unbounded.
+        // If they were both bounded, there might potentially be deadlocks
+        // where both channels are full and both threads are waiting on each
+        // other.
+        let (pybytes_sender, pybytes_receiver) =
+            crossbeam_channel::unbounded();
 
-    let res = combine_changeset_copies(
-        revs?,
-        children_count?,
-        target_rev,
-        rev_info_maker,
-        &is_ancestor_wrap,
-    );
+        // Start a thread that does CPU-heavy processing in parallel with the
+        // loop below.
+        //
+        // If the parent thread panics, `rev_info_sender` will be dropped and
+        // “disconnected”. `rev_info_receiver` will be notified of this and
+        // exit its own loop.
+        let thread = std::thread::spawn(move || {
+            let mut combine_changeset_copies =
+                CombineChangesetCopies::new(children_count);
+            for (rev, p1, p2, opt_bytes) in rev_info_receiver {
+                let files = match &opt_bytes {
+                    Some(raw) => ChangedFiles::new(raw.data()),
+                    // Python None was extracted to Option::None,
+                    // meaning there was no copy data.
+                    None => ChangedFiles::new_empty(),
+                };
+                combine_changeset_copies.add_revision(rev, p1, p2, files);
+
+                // Send `PyBytes` back to the parent thread so the parent
+                // thread can drop it. Otherwise the GIL would be implicitly
+                // acquired here through `impl Drop for PyBytes`.
+                if let Some(bytes) = opt_bytes {
+                    if let Err(_) = pybytes_sender.send(bytes.unwrap()) {
+                        // The channel is disconnected, meaning the parent
+                        // thread panicked or returned
+                        // early through
+                        // `?` to propagate a Python exception.
+                        break;
+                    }
+                }
+            }
+
+            combine_changeset_copies.finish(target_rev)
+        });
+
+        for rev_info in revs_info {
+            let (rev, p1, p2, opt_bytes) = rev_info?;
+            let opt_bytes = opt_bytes.map(|b| PyBytesWithData::new(py, b));
+
+            // We’d prefer to avoid the child thread calling into Python code,
+            // but this avoids a potential deadlock on the GIL if it does:
+            py.allow_threads(|| {
+                rev_info_sender.send((rev, p1, p2, opt_bytes)).expect(
+                    "combine_changeset_copies: channel is disconnected",
+                );
+            });
+
+            // Drop anything in the channel, without blocking
+            for pybytes in pybytes_receiver.try_iter() {
+                pybytes.release_ref(py)
+            }
+        }
+        // We’d prefer to avoid the child thread calling into Python code,
+        // but this avoids a potential deadlock on the GIL if it does:
+        path_copies = py.allow_threads(|| {
+            // Disconnect the channel to signal the child thread to stop:
+            // the `for … in rev_info_receiver` loop will end.
+            drop(rev_info_sender);
+
+            // Wait for the child thread to stop, and propagate any panic.
+            thread.join().unwrap_or_else(|panic_payload| {
+                std::panic::resume_unwind(panic_payload)
+            })
+        });
+
+        // Drop anything left in the channel
+        for pybytes in pybytes_receiver.iter() {
+            pybytes.release_ref(py)
+        }
+    };
+
     let out = PyDict::new(py);
-    for (dest, source) in res.into_iter() {
+    for (dest, source) in path_copies.into_iter() {
         out.set_item(
             py,
             PyBytes::new(py, &dest.into_vec()),
@@ -135,7 +232,7 @@
                 children: PyDict,
                 target_rev: Revision,
                 rev_info: PyObject,
-                is_ancestor: PyObject
+                multi_thread: bool
             )
         ),
     )?;
--- a/rust/hg-cpython/src/dirstate.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-cpython/src/dirstate.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -24,10 +24,7 @@
     exc, PyBytes, PyDict, PyErr, PyList, PyModule, PyObject, PyResult,
     PySequence, Python,
 };
-use hg::{
-    utils::hg_path::HgPathBuf, DirstateEntry, DirstateParseError, EntryState,
-    StateMap,
-};
+use hg::{utils::hg_path::HgPathBuf, DirstateEntry, EntryState, StateMap};
 use libc::{c_char, c_int};
 use std::convert::TryFrom;
 
@@ -79,11 +76,10 @@
         .map(|(filename, stats)| {
             let stats = stats.extract::<PySequence>(py)?;
             let state = stats.get_item(py, 0)?.extract::<PyBytes>(py)?;
-            let state = EntryState::try_from(state.data(py)[0]).map_err(
-                |e: DirstateParseError| {
+            let state =
+                EntryState::try_from(state.data(py)[0]).map_err(|e| {
                     PyErr::new::<exc::ValueError, _>(py, e.to_string())
-                },
-            )?;
+                })?;
             let mode = stats.get_item(py, 1)?.extract(py)?;
             let size = stats.get_item(py, 2)?.extract(py)?;
             let mtime = stats.get_item(py, 3)?.extract(py)?;
--- a/rust/hg-cpython/src/dirstate/dirs_multiset.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-cpython/src/dirstate/dirs_multiset.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -18,9 +18,9 @@
 
 use crate::dirstate::extract_dirstate;
 use hg::{
+    errors::HgError,
     utils::hg_path::{HgPath, HgPathBuf},
-    DirsMultiset, DirsMultisetIter, DirstateMapError, DirstateParseError,
-    EntryState,
+    DirsMultiset, DirsMultisetIter, DirstateMapError, EntryState,
 };
 
 py_class!(pub class Dirs |py| {
@@ -38,7 +38,7 @@
             skip_state = Some(
                 skip.extract::<PyBytes>(py)?.data(py)[0]
                     .try_into()
-                    .map_err(|e: DirstateParseError| {
+                    .map_err(|e: HgError| {
                         PyErr::new::<exc::ValueError, _>(py, e.to_string())
                     })?,
             );
@@ -46,7 +46,7 @@
         let inner = if let Ok(map) = map.cast_as::<PyDict>(py) {
             let dirstate = extract_dirstate(py, &map)?;
             DirsMultiset::from_dirstate(&dirstate, skip_state)
-                .map_err(|e| {
+                .map_err(|e: DirstateMapError| {
                     PyErr::new::<exc::ValueError, _>(py, e.to_string())
                 })?
         } else {
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -24,12 +24,14 @@
         NonNormalEntries, NonNormalEntriesIterator,
     },
     dirstate::{dirs_multiset::Dirs, make_dirstate_tuple},
+    parsers::dirstate_parents_to_pytuple,
 };
 use hg::{
+    errors::HgError,
+    revlog::Node,
     utils::hg_path::{HgPath, HgPathBuf},
     DirsMultiset, DirstateEntry, DirstateMap as RustDirstateMap,
-    DirstateMapError, DirstateParents, DirstateParseError, EntryState,
-    StateMapIter, PARENT_SIZE,
+    DirstateMapError, DirstateParents, EntryState, StateMapIter,
 };
 
 // TODO
@@ -84,13 +86,13 @@
             HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
             oldstate.extract::<PyBytes>(py)?.data(py)[0]
                 .try_into()
-                .map_err(|e: DirstateParseError| {
+                .map_err(|e: HgError| {
                     PyErr::new::<exc::ValueError, _>(py, e.to_string())
                 })?,
             DirstateEntry {
                 state: state.extract::<PyBytes>(py)?.data(py)[0]
                     .try_into()
-                    .map_err(|e: DirstateParseError| {
+                    .map_err(|e: HgError| {
                         PyErr::new::<exc::ValueError, _>(py, e.to_string())
                     })?,
                 mode: mode.extract(py)?,
@@ -113,7 +115,7 @@
                 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
                 oldstate.extract::<PyBytes>(py)?.data(py)[0]
                     .try_into()
-                    .map_err(|e: DirstateParseError| {
+                    .map_err(|e: HgError| {
                         PyErr::new::<exc::ValueError, _>(py, e.to_string())
                     })?,
                 size.extract(py)?,
@@ -137,7 +139,7 @@
                 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
                 oldstate.extract::<PyBytes>(py)?.data(py)[0]
                     .try_into()
-                    .map_err(|e: DirstateParseError| {
+                    .map_err(|e: HgError| {
                         PyErr::new::<exc::ValueError, _>(py, e.to_string())
                     })?,
             )
@@ -285,10 +287,7 @@
     def parents(&self, st: PyObject) -> PyResult<PyTuple> {
         self.inner(py).borrow_mut()
             .parents(st.extract::<PyBytes>(py)?.data(py))
-            .and_then(|d| {
-                Ok((PyBytes::new(py, &d.p1), PyBytes::new(py, &d.p2))
-                    .to_py_object(py))
-            })
+            .map(|parents| dirstate_parents_to_pytuple(py, parents))
             .or_else(|_| {
                 Err(PyErr::new::<exc::OSError, _>(
                     py,
@@ -311,9 +310,8 @@
             .read(st.extract::<PyBytes>(py)?.data(py))
         {
             Ok(Some(parents)) => Ok(Some(
-                (PyBytes::new(py, &parents.p1), PyBytes::new(py, &parents.p2))
-                    .to_py_object(py)
-                    .into_object(),
+                dirstate_parents_to_pytuple(py, parents)
+                    .into_object()
             )),
             Ok(None) => Ok(Some(py.None())),
             Err(_) => Err(PyErr::new::<exc::OSError, _>(
@@ -601,7 +599,7 @@
     Option<(PyBytes, PyObject)>
 );
 
-fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<[u8; PARENT_SIZE]> {
+fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
     let bytes = obj.extract::<PyBytes>(py)?;
     match bytes.data(py).try_into() {
         Ok(s) => Ok(s),
--- a/rust/hg-cpython/src/parsers.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-cpython/src/parsers.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -15,8 +15,7 @@
 };
 use hg::{
     pack_dirstate, parse_dirstate, utils::hg_path::HgPathBuf, DirstateEntry,
-    DirstatePackError, DirstateParents, DirstateParseError, FastHashMap,
-    PARENT_SIZE,
+    DirstateParents, FastHashMap, PARENT_SIZE,
 };
 use std::convert::TryInto;
 
@@ -54,26 +53,9 @@
                     PyBytes::new(py, copy_path.as_bytes()),
                 )?;
             }
-            Ok(
-                (PyBytes::new(py, &parents.p1), PyBytes::new(py, &parents.p2))
-                    .to_py_object(py),
-            )
+            Ok(dirstate_parents_to_pytuple(py, parents))
         }
-        Err(e) => Err(PyErr::new::<exc::ValueError, _>(
-            py,
-            match e {
-                DirstateParseError::TooLittleData => {
-                    "too little data for parents".to_string()
-                }
-                DirstateParseError::Overflow => {
-                    "overflow in dirstate".to_string()
-                }
-                DirstateParseError::CorruptedEntry(e) => e,
-                DirstateParseError::Damaged => {
-                    "dirstate appears to be damaged".to_string()
-                }
-            },
-        )),
+        Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
     }
 }
 
@@ -128,18 +110,9 @@
             }
             Ok(PyBytes::new(py, &packed))
         }
-        Err(error) => Err(PyErr::new::<exc::ValueError, _>(
-            py,
-            match error {
-                DirstatePackError::CorruptedParent => {
-                    "expected a 20-byte hash".to_string()
-                }
-                DirstatePackError::CorruptedEntry(e) => e,
-                DirstatePackError::BadSize(expected, actual) => {
-                    format!("bad dirstate size: {} != {}", actual, expected)
-                }
-            },
-        )),
+        Err(error) => {
+            Err(PyErr::new::<exc::ValueError, _>(py, error.to_string()))
+        }
     }
 }
 
@@ -179,3 +152,12 @@
 
     Ok(m)
 }
+
+pub(crate) fn dirstate_parents_to_pytuple(
+    py: Python,
+    parents: &DirstateParents,
+) -> PyTuple {
+    let p1 = PyBytes::new(py, parents.p1.as_bytes());
+    let p2 = PyBytes::new(py, parents.p2.as_bytes());
+    (p1, p2).to_py_object(py)
+}
--- a/rust/hg-cpython/src/revlog.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/hg-cpython/src/revlog.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -17,8 +17,8 @@
 };
 use hg::{
     nodemap::{Block, NodeMapError, NodeTree},
-    revlog::{nodemap::NodeMap, RevlogIndex},
-    NodeError, Revision,
+    revlog::{nodemap::NodeMap, NodePrefix, RevlogIndex},
+    Revision,
 };
 use std::cell::RefCell;
 
@@ -64,7 +64,7 @@
         let nt = opt.as_ref().unwrap();
         let idx = &*self.cindex(py).borrow();
         let node = node_from_py_bytes(py, &node)?;
-        nt.find_bin(idx, (&node).into()).map_err(|e| nodemap_error(py, e))
+        nt.find_bin(idx, node.into()).map_err(|e| nodemap_error(py, e))
     }
 
     /// same as `get_rev()` but raises a bare `error.RevlogError` if node
@@ -107,7 +107,9 @@
             String::from_utf8_lossy(node.data(py)).to_string()
         };
 
-        nt.find_hex(idx, &node_as_string)
+        let prefix = NodePrefix::from_hex(&node_as_string).map_err(|_| PyErr::new::<ValueError, _>(py, "Invalid node or prefix"))?;
+
+        nt.find_bin(idx, prefix)
             // TODO make an inner API returning the node directly
             .map(|opt| opt.map(
                 |rev| PyBytes::new(py, idx.node(rev).unwrap().as_bytes())))
@@ -468,17 +470,9 @@
     match err {
         NodeMapError::MultipleResults => revlog_error(py),
         NodeMapError::RevisionNotInIndex(r) => rev_not_in_index(py, r),
-        NodeMapError::InvalidNodePrefix(s) => invalid_node_prefix(py, &s),
     }
 }
 
-fn invalid_node_prefix(py: Python, ne: &NodeError) -> PyErr {
-    PyErr::new::<ValueError, _>(
-        py,
-        format!("Invalid node or prefix: {:?}", ne),
-    )
-}
-
 /// Create the module, with __package__ given from parent
 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
     let dotted_name = &format!("{}.revlog", package);
--- a/rust/rhg/Cargo.toml	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/rhg/Cargo.toml	Thu Mar 18 18:24:59 2021 -0400
@@ -9,8 +9,13 @@
 
 [dependencies]
 hg-core = { path = "../hg-core"}
+chrono = "0.4.19"
 clap = "2.33.1"
+derive_more = "0.99"
+lazy_static = "1.4.0"
 log = "0.4.11"
 micro-timer = "0.3.1"
+regex = "1.3.9"
 env_logger = "0.7.1"
-format-bytes = "0.1.3"
+format-bytes = "0.2.1"
+users = "0.11.0"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/blackbox.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,163 @@
+//! Logging for repository events, including commands run in the repository.
+
+use crate::CliInvocation;
+use format_bytes::format_bytes;
+use hg::errors::HgError;
+use hg::repo::Repo;
+use hg::utils::{files::get_bytes_from_os_str, shell_quote};
+
+const ONE_MEBIBYTE: u64 = 1 << 20;
+
+// TODO: somehow keep defaults in sync with `configitem` in `hgext/blackbox.py`
+const DEFAULT_MAX_SIZE: u64 = ONE_MEBIBYTE;
+const DEFAULT_MAX_FILES: u32 = 7;
+
+// Python does not support %.3f, only %f
+const DEFAULT_DATE_FORMAT: &str = "%Y/%m/%d %H:%M:%S%.3f";
+
+type DateTime = chrono::DateTime<chrono::Local>;
+
+pub struct ProcessStartTime {
+    /// For measuring duration
+    monotonic_clock: std::time::Instant,
+    /// For formatting with year, month, day, etc.
+    calendar_based: DateTime,
+}
+
+impl ProcessStartTime {
+    pub fn now() -> Self {
+        Self {
+            monotonic_clock: std::time::Instant::now(),
+            calendar_based: chrono::Local::now(),
+        }
+    }
+}
+
+pub struct Blackbox<'a> {
+    process_start_time: &'a ProcessStartTime,
+    /// Do nothing if this is `None`
+    configured: Option<ConfiguredBlackbox<'a>>,
+}
+
+struct ConfiguredBlackbox<'a> {
+    repo: &'a Repo,
+    max_size: u64,
+    max_files: u32,
+    date_format: &'a str,
+}
+
+impl<'a> Blackbox<'a> {
+    pub fn new(
+        invocation: &'a CliInvocation<'a>,
+        process_start_time: &'a ProcessStartTime,
+    ) -> Result<Self, HgError> {
+        let configured = if let Ok(repo) = invocation.repo {
+            if invocation.config.get(b"extensions", b"blackbox").is_none() {
+                // The extension is not enabled
+                None
+            } else {
+                Some(ConfiguredBlackbox {
+                    repo,
+                    max_size: invocation
+                        .config
+                        .get_byte_size(b"blackbox", b"maxsize")?
+                        .unwrap_or(DEFAULT_MAX_SIZE),
+                    max_files: invocation
+                        .config
+                        .get_u32(b"blackbox", b"maxfiles")?
+                        .unwrap_or(DEFAULT_MAX_FILES),
+                    date_format: invocation
+                        .config
+                        .get_str(b"blackbox", b"date-format")?
+                        .unwrap_or(DEFAULT_DATE_FORMAT),
+                })
+            }
+        } else {
+            // Without a local repository there’s no `.hg/blackbox.log` to
+            // write to.
+            None
+        };
+        Ok(Self {
+            process_start_time,
+            configured,
+        })
+    }
+
+    pub fn log_command_start(&self) {
+        if let Some(configured) = &self.configured {
+            let message = format_bytes!(b"(rust) {}", format_cli_args());
+            configured.log(&self.process_start_time.calendar_based, &message);
+        }
+    }
+
+    pub fn log_command_end(&self, exit_code: i32) {
+        if let Some(configured) = &self.configured {
+            let now = chrono::Local::now();
+            let duration = self
+                .process_start_time
+                .monotonic_clock
+                .elapsed()
+                .as_secs_f64();
+            let message = format_bytes!(
+                b"(rust) {} exited {} after {} seconds",
+                format_cli_args(),
+                exit_code,
+                format_bytes::Utf8(format_args!("{:.03}", duration))
+            );
+            configured.log(&now, &message);
+        }
+    }
+}
+
+impl ConfiguredBlackbox<'_> {
+    fn log(&self, date_time: &DateTime, message: &[u8]) {
+        let date = format_bytes::Utf8(date_time.format(self.date_format));
+        let user = users::get_current_username().map(get_bytes_from_os_str);
+        let user = user.as_deref().unwrap_or(b"???");
+        let rev = format_bytes::Utf8(match self.repo.dirstate_parents() {
+            Ok(parents) if parents.p2 == hg::revlog::node::NULL_NODE => {
+                format!("{:x}", parents.p1)
+            }
+            Ok(parents) => format!("{:x}+{:x}", parents.p1, parents.p2),
+            Err(_dirstate_corruption_error) => {
+                // TODO: log a non-fatal warning to stderr
+                "???".to_owned()
+            }
+        });
+        let pid = std::process::id();
+        let line = format_bytes!(
+            b"{} {} @{} ({})> {}\n",
+            date,
+            user,
+            rev,
+            pid,
+            message
+        );
+        let result =
+            hg::logging::LogFile::new(self.repo.hg_vfs(), "blackbox.log")
+                .max_size(Some(self.max_size))
+                .max_files(self.max_files)
+                .write(&line);
+        match result {
+            Ok(()) => {}
+            Err(_io_error) => {
+                // TODO: log a non-fatal warning to stderr
+            }
+        }
+    }
+}
+
+fn format_cli_args() -> Vec<u8> {
+    let mut args = std::env::args_os();
+    let _ = args.next(); // Skip the first (or zeroth) arg, the name of the `rhg` executable
+    let mut args = args.map(|arg| shell_quote(&get_bytes_from_os_str(arg)));
+    let mut formatted = Vec::new();
+    if let Some(arg) = args.next() {
+        formatted.extend(arg)
+    }
+    for arg in args {
+        formatted.push(b' ');
+        formatted.extend(arg)
+    }
+    formatted
+}
--- a/rust/rhg/src/commands.rs	Sat Mar 13 02:09:23 2021 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,14 +0,0 @@
-pub mod cat;
-pub mod debugdata;
-pub mod debugrequirements;
-pub mod files;
-pub mod root;
-use crate::error::CommandError;
-use crate::ui::Ui;
-
-/// The common trait for rhg commands
-///
-/// Normalize the interface of the commands provided by rhg
-pub trait Command {
-    fn run(&self, ui: &Ui) -> Result<(), CommandError>;
-}
--- a/rust/rhg/src/commands/cat.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/rhg/src/commands/cat.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -1,9 +1,7 @@
-use crate::commands::Command;
-use crate::error::{CommandError, CommandErrorKind};
-use crate::ui::utf8_to_local;
-use crate::ui::Ui;
-use hg::operations::{cat, CatRevError, CatRevErrorKind};
-use hg::repo::Repo;
+use crate::error::CommandError;
+use clap::Arg;
+use format_bytes::format_bytes;
+use hg::operations::cat;
 use hg::utils::hg_path::HgPathBuf;
 use micro_timer::timed;
 use std::convert::TryFrom;
@@ -12,94 +10,75 @@
 Output the current or given revision of files
 ";
 
-pub struct CatCommand<'a> {
-    rev: Option<&'a str>,
-    files: Vec<&'a str>,
-}
-
-impl<'a> CatCommand<'a> {
-    pub fn new(rev: Option<&'a str>, files: Vec<&'a str>) -> Self {
-        Self { rev, files }
-    }
-
-    fn display(&self, ui: &Ui, data: &[u8]) -> Result<(), CommandError> {
-        ui.write_stdout(data)?;
-        Ok(())
-    }
-}
-
-impl<'a> Command for CatCommand<'a> {
-    #[timed]
-    fn run(&self, ui: &Ui) -> Result<(), CommandError> {
-        let repo = Repo::find()?;
-        repo.check_requirements()?;
-        let cwd = std::env::current_dir()
-            .or_else(|e| Err(CommandErrorKind::CurrentDirNotFound(e)))?;
-
-        let mut files = vec![];
-        for file in self.files.iter() {
-            let normalized = cwd.join(&file);
-            let stripped = normalized
-                .strip_prefix(&repo.working_directory_path())
-                .or(Err(CommandErrorKind::Abort(None)))?;
-            let hg_file = HgPathBuf::try_from(stripped.to_path_buf())
-                .or(Err(CommandErrorKind::Abort(None)))?;
-            files.push(hg_file);
-        }
-
-        match self.rev {
-            Some(rev) => {
-                let data = cat(&repo, rev, &files)
-                    .map_err(|e| map_rev_error(rev, e))?;
-                self.display(ui, &data)
-            }
-            None => Err(CommandErrorKind::Unimplemented.into()),
-        }
-    }
+pub fn args() -> clap::App<'static, 'static> {
+    clap::SubCommand::with_name("cat")
+        .arg(
+            Arg::with_name("rev")
+                .help("search the repository as it is in REV")
+                .short("-r")
+                .long("--revision")
+                .value_name("REV")
+                .takes_value(true),
+        )
+        .arg(
+            clap::Arg::with_name("files")
+                .required(true)
+                .multiple(true)
+                .empty_values(false)
+                .value_name("FILE")
+                .help("Activity to start: activity@category"),
+        )
+        .about(HELP_TEXT)
 }
 
-/// Convert `CatRevErrorKind` to `CommandError`
-fn map_rev_error(rev: &str, err: CatRevError) -> CommandError {
-    CommandError {
-        kind: match err.kind {
-            CatRevErrorKind::IoError(err) => CommandErrorKind::Abort(Some(
-                utf8_to_local(&format!("abort: {}\n", err)).into(),
-            )),
-            CatRevErrorKind::InvalidRevision => CommandErrorKind::Abort(Some(
-                utf8_to_local(&format!(
-                    "abort: invalid revision identifier {}\n",
-                    rev
-                ))
-                .into(),
-            )),
-            CatRevErrorKind::AmbiguousPrefix => CommandErrorKind::Abort(Some(
-                utf8_to_local(&format!(
-                    "abort: ambiguous revision identifier {}\n",
-                    rev
-                ))
-                .into(),
-            )),
-            CatRevErrorKind::UnsuportedRevlogVersion(version) => {
-                CommandErrorKind::Abort(Some(
-                    utf8_to_local(&format!(
-                        "abort: unsupported revlog version {}\n",
-                        version
-                    ))
-                    .into(),
-                ))
+#[timed]
+pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
+    let rev = invocation.subcommand_args.value_of("rev");
+    let file_args = match invocation.subcommand_args.values_of("files") {
+        Some(files) => files.collect(),
+        None => vec![],
+    };
+
+    let repo = invocation.repo?;
+    let cwd = hg::utils::current_dir()?;
+    let working_directory = repo.working_directory_path();
+    let working_directory = cwd.join(working_directory); // Make it absolute
+
+    let mut files = vec![];
+    for file in file_args.iter() {
+        // TODO: actually normalize `..` path segments etc?
+        let normalized = cwd.join(&file);
+        let stripped = normalized
+            .strip_prefix(&working_directory)
+            // TODO: error message for path arguments outside of the repo
+            .map_err(|_| CommandError::abort(""))?;
+        let hg_file = HgPathBuf::try_from(stripped.to_path_buf())
+            .map_err(|e| CommandError::abort(e.to_string()))?;
+        files.push(hg_file);
+    }
+
+    match rev {
+        Some(rev) => {
+            let output = cat(&repo, rev, &files).map_err(|e| (e, rev))?;
+            invocation.ui.write_stdout(&output.concatenated)?;
+            if !output.missing.is_empty() {
+                let short = format!("{:x}", output.node.short()).into_bytes();
+                for path in &output.missing {
+                    invocation.ui.write_stderr(&format_bytes!(
+                        b"{}: no such file in rev {}\n",
+                        path.as_bytes(),
+                        short
+                    ))?;
+                }
             }
-            CatRevErrorKind::CorruptedRevlog => CommandErrorKind::Abort(Some(
-                "abort: corrupted revlog\n".into(),
-            )),
-            CatRevErrorKind::UnknowRevlogDataFormat(format) => {
-                CommandErrorKind::Abort(Some(
-                    utf8_to_local(&format!(
-                        "abort: unknow revlog dataformat {:?}\n",
-                        format
-                    ))
-                    .into(),
-                ))
+            if output.found_any {
+                Ok(())
+            } else {
+                Err(CommandError::Unsuccessful)
             }
-        },
+        }
+        None => Err(CommandError::unsupported(
+            "`rhg cat` without `--rev` / `-r`",
+        )),
     }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/commands/config.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,38 @@
+use crate::error::CommandError;
+use clap::Arg;
+use format_bytes::format_bytes;
+use hg::errors::HgError;
+use hg::utils::SliceExt;
+
+pub const HELP_TEXT: &str = "
+With one argument of the form section.name, print just the value of that config item.
+";
+
+pub fn args() -> clap::App<'static, 'static> {
+    clap::SubCommand::with_name("config")
+        .arg(
+            Arg::with_name("name")
+                .help("the section.name to print")
+                .value_name("NAME")
+                .required(true)
+                .takes_value(true),
+        )
+        .about(HELP_TEXT)
+}
+
+pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
+    let (section, name) = invocation
+        .subcommand_args
+        .value_of("name")
+        .expect("missing required CLI argument")
+        .as_bytes()
+        .split_2(b'.')
+        .ok_or_else(|| HgError::unsupported("hg config <section>"))?;
+
+    if let Some(value) = invocation.config.get(section, name) {
+        invocation.ui.write_stdout(&format_bytes!(b"{}\n", value))?;
+        Ok(())
+    } else {
+        Err(CommandError::Unsuccessful)
+    }
+}
--- a/rust/rhg/src/commands/debugdata.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/rhg/src/commands/debugdata.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -1,91 +1,65 @@
-use crate::commands::Command;
-use crate::error::{CommandError, CommandErrorKind};
-use crate::ui::utf8_to_local;
-use crate::ui::Ui;
-use hg::operations::{
-    debug_data, DebugDataError, DebugDataErrorKind, DebugDataKind,
-};
-use hg::repo::Repo;
+use crate::error::CommandError;
+use clap::Arg;
+use clap::ArgGroup;
+use hg::operations::{debug_data, DebugDataKind};
 use micro_timer::timed;
 
 pub const HELP_TEXT: &str = "
 Dump the contents of a data file revision
 ";
 
-pub struct DebugDataCommand<'a> {
-    rev: &'a str,
-    kind: DebugDataKind,
-}
-
-impl<'a> DebugDataCommand<'a> {
-    pub fn new(rev: &'a str, kind: DebugDataKind) -> Self {
-        DebugDataCommand { rev, kind }
-    }
-}
-
-impl<'a> Command for DebugDataCommand<'a> {
-    #[timed]
-    fn run(&self, ui: &Ui) -> Result<(), CommandError> {
-        let repo = Repo::find()?;
-        let data = debug_data(&repo, self.rev, self.kind)
-            .map_err(|e| to_command_error(self.rev, e))?;
-
-        let mut stdout = ui.stdout_buffer();
-        stdout.write_all(&data)?;
-        stdout.flush()?;
-
-        Ok(())
-    }
+pub fn args() -> clap::App<'static, 'static> {
+    clap::SubCommand::with_name("debugdata")
+        .arg(
+            Arg::with_name("changelog")
+                .help("open changelog")
+                .short("-c")
+                .long("--changelog"),
+        )
+        .arg(
+            Arg::with_name("manifest")
+                .help("open manifest")
+                .short("-m")
+                .long("--manifest"),
+        )
+        .group(
+            ArgGroup::with_name("")
+                .args(&["changelog", "manifest"])
+                .required(true),
+        )
+        .arg(
+            Arg::with_name("rev")
+                .help("revision")
+                .required(true)
+                .value_name("REV"),
+        )
+        .about(HELP_TEXT)
 }
 
-/// Convert operation errors to command errors
-fn to_command_error(rev: &str, err: DebugDataError) -> CommandError {
-    match err.kind {
-        DebugDataErrorKind::IoError(err) => CommandError {
-            kind: CommandErrorKind::Abort(Some(
-                utf8_to_local(&format!("abort: {}\n", err)).into(),
-            )),
-        },
-        DebugDataErrorKind::InvalidRevision => CommandError {
-            kind: CommandErrorKind::Abort(Some(
-                utf8_to_local(&format!(
-                    "abort: invalid revision identifier{}\n",
-                    rev
-                ))
-                .into(),
-            )),
-        },
-        DebugDataErrorKind::AmbiguousPrefix => CommandError {
-            kind: CommandErrorKind::Abort(Some(
-                utf8_to_local(&format!(
-                    "abort: ambiguous revision identifier{}\n",
-                    rev
-                ))
-                .into(),
-            )),
-        },
-        DebugDataErrorKind::UnsuportedRevlogVersion(version) => CommandError {
-            kind: CommandErrorKind::Abort(Some(
-                utf8_to_local(&format!(
-                    "abort: unsupported revlog version {}\n",
-                    version
-                ))
-                .into(),
-            )),
-        },
-        DebugDataErrorKind::CorruptedRevlog => CommandError {
-            kind: CommandErrorKind::Abort(Some(
-                "abort: corrupted revlog\n".into(),
-            )),
-        },
-        DebugDataErrorKind::UnknowRevlogDataFormat(format) => CommandError {
-            kind: CommandErrorKind::Abort(Some(
-                utf8_to_local(&format!(
-                    "abort: unknow revlog dataformat {:?}\n",
-                    format
-                ))
-                .into(),
-            )),
-        },
-    }
+#[timed]
+pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
+    let args = invocation.subcommand_args;
+    let rev = args
+        .value_of("rev")
+        .expect("rev should be a required argument");
+    let kind =
+        match (args.is_present("changelog"), args.is_present("manifest")) {
+            (true, false) => DebugDataKind::Changelog,
+            (false, true) => DebugDataKind::Manifest,
+            (true, true) => {
+                unreachable!("Should not happen since options are exclusive")
+            }
+            (false, false) => {
+                unreachable!("Should not happen since options are required")
+            }
+        };
+
+    let repo = invocation.repo?;
+    let data = debug_data(repo, rev, kind).map_err(|e| (e, rev))?;
+
+    let mut stdout = invocation.ui.stdout_buffer();
+    stdout.write_all(&data)?;
+    stdout.flush()?;
+
+    Ok(())
 }
--- a/rust/rhg/src/commands/debugrequirements.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/rhg/src/commands/debugrequirements.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -1,30 +1,22 @@
-use crate::commands::Command;
 use crate::error::CommandError;
-use crate::ui::Ui;
-use hg::repo::Repo;
-use hg::requirements;
 
 pub const HELP_TEXT: &str = "
 Print the current repo requirements.
 ";
 
-pub struct DebugRequirementsCommand {}
-
-impl DebugRequirementsCommand {
-    pub fn new() -> Self {
-        DebugRequirementsCommand {}
-    }
+pub fn args() -> clap::App<'static, 'static> {
+    clap::SubCommand::with_name("debugrequirements").about(HELP_TEXT)
 }
 
-impl Command for DebugRequirementsCommand {
-    fn run(&self, ui: &Ui) -> Result<(), CommandError> {
-        let repo = Repo::find()?;
-        let mut output = String::new();
-        for req in requirements::load(&repo)? {
-            output.push_str(&req);
-            output.push('\n');
-        }
-        ui.write_stdout(output.as_bytes())?;
-        Ok(())
+pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
+    let repo = invocation.repo?;
+    let mut output = String::new();
+    let mut requirements: Vec<_> = repo.requirements().iter().collect();
+    requirements.sort();
+    for req in requirements {
+        output.push_str(req);
+        output.push('\n');
     }
+    invocation.ui.write_stdout(output.as_bytes())?;
+    Ok(())
 }
--- a/rust/rhg/src/commands/files.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/rhg/src/commands/files.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -1,15 +1,10 @@
-use crate::commands::Command;
-use crate::error::{CommandError, CommandErrorKind};
-use crate::ui::utf8_to_local;
+use crate::error::CommandError;
 use crate::ui::Ui;
-use hg::operations::{
-    list_rev_tracked_files, ListRevTrackedFilesError,
-    ListRevTrackedFilesErrorKind,
-};
-use hg::operations::{
-    Dirstate, ListDirstateTrackedFilesError, ListDirstateTrackedFilesErrorKind,
-};
+use clap::Arg;
+use hg::operations::list_rev_tracked_files;
+use hg::operations::Dirstate;
 use hg::repo::Repo;
+use hg::utils::current_dir;
 use hg::utils::files::{get_bytes_from_path, relativize_path};
 use hg::utils::hg_path::{HgPath, HgPathBuf};
 
@@ -19,124 +14,64 @@
 Returns 0 on success.
 ";
 
-pub struct FilesCommand<'a> {
-    rev: Option<&'a str>,
+pub fn args() -> clap::App<'static, 'static> {
+    clap::SubCommand::with_name("files")
+        .arg(
+            Arg::with_name("rev")
+                .help("search the repository as it is in REV")
+                .short("-r")
+                .long("--revision")
+                .value_name("REV")
+                .takes_value(true),
+        )
+        .about(HELP_TEXT)
 }
 
-impl<'a> FilesCommand<'a> {
-    pub fn new(rev: Option<&'a str>) -> Self {
-        FilesCommand { rev }
+pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
+    let relative = invocation.config.get(b"ui", b"relative-paths");
+    if relative.is_some() {
+        return Err(CommandError::unsupported(
+            "non-default ui.relative-paths",
+        ));
     }
 
-    fn display_files(
-        &self,
-        ui: &Ui,
-        repo: &Repo,
-        files: impl IntoIterator<Item = &'a HgPath>,
-    ) -> Result<(), CommandError> {
-        let cwd = std::env::current_dir()
-            .or_else(|e| Err(CommandErrorKind::CurrentDirNotFound(e)))?;
-        let rooted_cwd = cwd
-            .strip_prefix(repo.working_directory_path())
-            .expect("cwd was already checked within the repository");
-        let rooted_cwd = HgPathBuf::from(get_bytes_from_path(rooted_cwd));
-
-        let mut stdout = ui.stdout_buffer();
+    let rev = invocation.subcommand_args.value_of("rev");
 
-        for file in files {
-            stdout.write_all(relativize_path(file, &rooted_cwd).as_ref())?;
-            stdout.write_all(b"\n")?;
-        }
-        stdout.flush()?;
-        Ok(())
-    }
-}
-
-impl<'a> Command for FilesCommand<'a> {
-    fn run(&self, ui: &Ui) -> Result<(), CommandError> {
-        let repo = Repo::find()?;
-        repo.check_requirements()?;
-        if let Some(rev) = self.rev {
-            let files = list_rev_tracked_files(&repo, rev)
-                .map_err(|e| map_rev_error(rev, e))?;
-            self.display_files(ui, &repo, files.iter())
-        } else {
-            let distate = Dirstate::new(&repo).map_err(map_dirstate_error)?;
-            let files = distate.tracked_files().map_err(map_dirstate_error)?;
-            self.display_files(ui, &repo, files)
-        }
+    let repo = invocation.repo?;
+    if let Some(rev) = rev {
+        let files = list_rev_tracked_files(repo, rev).map_err(|e| (e, rev))?;
+        display_files(invocation.ui, repo, files.iter())
+    } else {
+        let distate = Dirstate::new(repo)?;
+        let files = distate.tracked_files()?;
+        display_files(invocation.ui, repo, files)
     }
 }
 
-/// Convert `ListRevTrackedFilesErrorKind` to `CommandError`
-fn map_rev_error(rev: &str, err: ListRevTrackedFilesError) -> CommandError {
-    CommandError {
-        kind: match err.kind {
-            ListRevTrackedFilesErrorKind::IoError(err) => {
-                CommandErrorKind::Abort(Some(
-                    utf8_to_local(&format!("abort: {}\n", err)).into(),
-                ))
-            }
-            ListRevTrackedFilesErrorKind::InvalidRevision => {
-                CommandErrorKind::Abort(Some(
-                    utf8_to_local(&format!(
-                        "abort: invalid revision identifier {}\n",
-                        rev
-                    ))
-                    .into(),
-                ))
-            }
-            ListRevTrackedFilesErrorKind::AmbiguousPrefix => {
-                CommandErrorKind::Abort(Some(
-                    utf8_to_local(&format!(
-                        "abort: ambiguous revision identifier {}\n",
-                        rev
-                    ))
-                    .into(),
-                ))
-            }
-            ListRevTrackedFilesErrorKind::UnsuportedRevlogVersion(version) => {
-                CommandErrorKind::Abort(Some(
-                    utf8_to_local(&format!(
-                        "abort: unsupported revlog version {}\n",
-                        version
-                    ))
-                    .into(),
-                ))
-            }
-            ListRevTrackedFilesErrorKind::CorruptedRevlog => {
-                CommandErrorKind::Abort(Some(
-                    "abort: corrupted revlog\n".into(),
-                ))
-            }
-            ListRevTrackedFilesErrorKind::UnknowRevlogDataFormat(format) => {
-                CommandErrorKind::Abort(Some(
-                    utf8_to_local(&format!(
-                        "abort: unknow revlog dataformat {:?}\n",
-                        format
-                    ))
-                    .into(),
-                ))
-            }
-        },
+fn display_files<'a>(
+    ui: &Ui,
+    repo: &Repo,
+    files: impl IntoIterator<Item = &'a HgPath>,
+) -> Result<(), CommandError> {
+    let cwd = HgPathBuf::from(get_bytes_from_path(hg::utils::current_dir()?));
+    let working_directory = repo.working_directory_path();
+    let working_directory = current_dir()?.join(working_directory); // Make it absolute
+    let working_directory =
+        HgPathBuf::from(get_bytes_from_path(working_directory));
+
+    let mut stdout = ui.stdout_buffer();
+
+    let mut any = false;
+    for file in files {
+        any = true;
+        let file = working_directory.join(file);
+        stdout.write_all(relativize_path(&file, &cwd).as_ref())?;
+        stdout.write_all(b"\n")?;
+    }
+    stdout.flush()?;
+    if any {
+        Ok(())
+    } else {
+        Err(CommandError::Unsuccessful)
     }
 }
-
-/// Convert `ListDirstateTrackedFilesError` to `CommandError`
-fn map_dirstate_error(err: ListDirstateTrackedFilesError) -> CommandError {
-    CommandError {
-        kind: match err.kind {
-            ListDirstateTrackedFilesErrorKind::IoError(err) => {
-                CommandErrorKind::Abort(Some(
-                    utf8_to_local(&format!("abort: {}\n", err)).into(),
-                ))
-            }
-            ListDirstateTrackedFilesErrorKind::ParseError(_) => {
-                CommandErrorKind::Abort(Some(
-                    // TODO find a better error message
-                    b"abort: parse error\n".to_vec(),
-                ))
-            }
-        },
-    }
-}
--- a/rust/rhg/src/commands/root.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/rhg/src/commands/root.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -1,8 +1,6 @@
-use crate::commands::Command;
 use crate::error::CommandError;
-use crate::ui::Ui;
 use format_bytes::format_bytes;
-use hg::repo::Repo;
+use hg::errors::{IoErrorContext, IoResultExt};
 use hg::utils::files::get_bytes_from_path;
 
 pub const HELP_TEXT: &str = "
@@ -11,19 +9,20 @@
 Returns 0 on success.
 ";
 
-pub struct RootCommand {}
-
-impl RootCommand {
-    pub fn new() -> Self {
-        RootCommand {}
-    }
+pub fn args() -> clap::App<'static, 'static> {
+    clap::SubCommand::with_name("root").about(HELP_TEXT)
 }
 
-impl Command for RootCommand {
-    fn run(&self, ui: &Ui) -> Result<(), CommandError> {
-        let repo = Repo::find()?;
-        let bytes = get_bytes_from_path(repo.working_directory_path());
-        ui.write_stdout(&format_bytes!(b"{}\n", bytes.as_slice()))?;
-        Ok(())
-    }
+pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
+    let repo = invocation.repo?;
+    let working_directory = repo.working_directory_path();
+    let working_directory = std::fs::canonicalize(working_directory)
+        .with_context(|| {
+            IoErrorContext::CanonicalizingPath(working_directory.to_owned())
+        })?;
+    let bytes = get_bytes_from_path(&working_directory);
+    invocation
+        .ui
+        .write_stdout(&format_bytes!(b"{}\n", bytes.as_slice()))?;
+    Ok(())
 }
--- a/rust/rhg/src/error.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/rhg/src/error.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -1,124 +1,146 @@
-use crate::exitcode;
+use crate::ui::utf8_to_local;
 use crate::ui::UiError;
+use crate::NoRepoInCwdError;
 use format_bytes::format_bytes;
-use hg::operations::{FindRootError, FindRootErrorKind};
-use hg::requirements::RequirementsError;
+use hg::config::{ConfigError, ConfigParseError};
+use hg::errors::HgError;
+use hg::repo::RepoError;
+use hg::revlog::revlog::RevlogError;
 use hg::utils::files::get_bytes_from_path;
 use std::convert::From;
-use std::path::PathBuf;
 
 /// The kind of command error
 #[derive(Debug)]
-pub enum CommandErrorKind {
-    /// The root of the repository cannot be found
-    RootNotFound(PathBuf),
-    /// The current directory cannot be found
-    CurrentDirNotFound(std::io::Error),
-    /// `.hg/requires`
-    RequirementsError(RequirementsError),
-    /// The standard output stream cannot be written to
-    StdoutError,
-    /// The standard error stream cannot be written to
-    StderrError,
-    /// The command aborted
-    Abort(Option<Vec<u8>>),
-    /// A mercurial capability as not been implemented.
-    Unimplemented,
+pub enum CommandError {
+    /// Exit with an error message and "standard" failure exit code.
+    Abort { message: Vec<u8> },
+
+    /// Exit with a failure exit code but no message.
+    Unsuccessful,
+
+    /// Encountered something (such as a CLI argument, repository layout, …)
+    /// not supported by this version of `rhg`. Depending on configuration
+    /// `rhg` may attempt to silently fall back to Python-based `hg`, which
+    /// may or may not support this feature.
+    UnsupportedFeature { message: Vec<u8> },
 }
 
-impl CommandErrorKind {
-    pub fn get_exit_code(&self) -> exitcode::ExitCode {
-        match self {
-            CommandErrorKind::RootNotFound(_) => exitcode::ABORT,
-            CommandErrorKind::CurrentDirNotFound(_) => exitcode::ABORT,
-            CommandErrorKind::RequirementsError(
-                RequirementsError::Unsupported { .. },
-            ) => exitcode::UNIMPLEMENTED_COMMAND,
-            CommandErrorKind::RequirementsError(_) => exitcode::ABORT,
-            CommandErrorKind::StdoutError => exitcode::ABORT,
-            CommandErrorKind::StderrError => exitcode::ABORT,
-            CommandErrorKind::Abort(_) => exitcode::ABORT,
-            CommandErrorKind::Unimplemented => exitcode::UNIMPLEMENTED_COMMAND,
+impl CommandError {
+    pub fn abort(message: impl AsRef<str>) -> Self {
+        CommandError::Abort {
+            // TODO: bytes-based (instead of Unicode-based) formatting
+            // of error messages to handle non-UTF-8 filenames etc:
+            // https://www.mercurial-scm.org/wiki/EncodingStrategy#Mixing_output
+            message: utf8_to_local(message.as_ref()).into(),
         }
     }
 
-    /// Return the message corresponding to the error kind if any
-    pub fn get_error_message_bytes(&self) -> Option<Vec<u8>> {
-        match self {
-            CommandErrorKind::RootNotFound(path) => {
-                let bytes = get_bytes_from_path(path);
-                Some(format_bytes!(
-                    b"abort: no repository found in '{}' (.hg not found)!\n",
-                    bytes.as_slice()
-                ))
+    pub fn unsupported(message: impl AsRef<str>) -> Self {
+        CommandError::UnsupportedFeature {
+            message: utf8_to_local(message.as_ref()).into(),
+        }
+    }
+}
+
+/// For now we don’t differenciate between invalid CLI args and valid for `hg`
+/// but not supported yet by `rhg`.
+impl From<clap::Error> for CommandError {
+    fn from(error: clap::Error) -> Self {
+        CommandError::unsupported(error.to_string())
+    }
+}
+
+impl From<HgError> for CommandError {
+    fn from(error: HgError) -> Self {
+        match error {
+            HgError::UnsupportedFeature(message) => {
+                CommandError::unsupported(message)
             }
-            CommandErrorKind::CurrentDirNotFound(e) => Some(format_bytes!(
-                b"abort: error getting current working directory: {}\n",
-                e.to_string().as_bytes(),
-            )),
-            CommandErrorKind::RequirementsError(
-                RequirementsError::Corrupted,
-            ) => Some(
-                "abort: .hg/requires is corrupted\n".as_bytes().to_owned(),
-            ),
-            CommandErrorKind::Abort(message) => message.to_owned(),
-            _ => None,
+            _ => CommandError::abort(error.to_string()),
         }
     }
 }
 
-/// The error type for the Command trait
-#[derive(Debug)]
-pub struct CommandError {
-    pub kind: CommandErrorKind,
-}
-
-impl CommandError {
-    /// Exist the process with the corresponding exit code.
-    pub fn exit(&self) {
-        std::process::exit(self.kind.get_exit_code())
-    }
-
-    /// Return the message corresponding to the command error if any
-    pub fn get_error_message_bytes(&self) -> Option<Vec<u8>> {
-        self.kind.get_error_message_bytes()
+impl From<UiError> for CommandError {
+    fn from(_error: UiError) -> Self {
+        // If we already failed writing to stdout or stderr,
+        // writing an error message to stderr about it would be likely to fail
+        // too.
+        CommandError::abort("")
     }
 }
 
-impl From<CommandErrorKind> for CommandError {
-    fn from(kind: CommandErrorKind) -> Self {
-        CommandError { kind }
+impl From<RepoError> for CommandError {
+    fn from(error: RepoError) -> Self {
+        match error {
+            RepoError::NotFound { at } => CommandError::Abort {
+                message: format_bytes!(
+                    b"abort: repository {} not found",
+                    get_bytes_from_path(at)
+                ),
+            },
+            RepoError::ConfigParseError(error) => error.into(),
+            RepoError::Other(error) => error.into(),
+        }
     }
 }
 
-impl From<UiError> for CommandError {
-    fn from(error: UiError) -> Self {
-        CommandError {
-            kind: match error {
-                UiError::StdoutError(_) => CommandErrorKind::StdoutError,
-                UiError::StderrError(_) => CommandErrorKind::StderrError,
-            },
+impl<'a> From<&'a NoRepoInCwdError> for CommandError {
+    fn from(error: &'a NoRepoInCwdError) -> Self {
+        let NoRepoInCwdError { cwd } = error;
+        CommandError::Abort {
+            message: format_bytes!(
+                b"abort: no repository found in '{}' (.hg not found)!",
+                get_bytes_from_path(cwd)
+            ),
         }
     }
 }
 
-impl From<FindRootError> for CommandError {
-    fn from(err: FindRootError) -> Self {
-        match err.kind {
-            FindRootErrorKind::RootNotFound(path) => CommandError {
-                kind: CommandErrorKind::RootNotFound(path),
-            },
-            FindRootErrorKind::GetCurrentDirError(e) => CommandError {
-                kind: CommandErrorKind::CurrentDirNotFound(e),
-            },
+impl From<ConfigError> for CommandError {
+    fn from(error: ConfigError) -> Self {
+        match error {
+            ConfigError::Parse(error) => error.into(),
+            ConfigError::Other(error) => error.into(),
         }
     }
 }
 
-impl From<RequirementsError> for CommandError {
-    fn from(err: RequirementsError) -> Self {
-        CommandError {
-            kind: CommandErrorKind::RequirementsError(err),
+impl From<ConfigParseError> for CommandError {
+    fn from(error: ConfigParseError) -> Self {
+        let ConfigParseError {
+            origin,
+            line,
+            message,
+        } = error;
+        let line_message = if let Some(line_number) = line {
+            format_bytes!(b":{}", line_number.to_string().into_bytes())
+        } else {
+            Vec::new()
+        };
+        CommandError::Abort {
+            message: format_bytes!(
+                b"config error at {}{}: {}",
+                origin,
+                line_message,
+                message
+            ),
         }
     }
 }
+
+impl From<(RevlogError, &str)> for CommandError {
+    fn from((err, rev): (RevlogError, &str)) -> CommandError {
+        match err {
+            RevlogError::InvalidRevision => CommandError::abort(format!(
+                "abort: invalid revision identifier: {}",
+                rev
+            )),
+            RevlogError::AmbiguousPrefix => CommandError::abort(format!(
+                "abort: ambiguous revision identifier: {}",
+                rev
+            )),
+            RevlogError::Other(error) => error.into(),
+        }
+    }
+}
--- a/rust/rhg/src/exitcode.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/rhg/src/exitcode.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -6,5 +6,8 @@
 /// Generic abort
 pub const ABORT: ExitCode = 255;
 
-/// Command not implemented by rhg
-pub const UNIMPLEMENTED_COMMAND: ExitCode = 252;
+/// Generic something completed but did not succeed
+pub const UNSUCCESSFUL: ExitCode = 1;
+
+/// Command or feature not implemented by rhg
+pub const UNIMPLEMENTED: ExitCode = 252;
--- a/rust/rhg/src/main.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/rhg/src/main.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -1,185 +1,455 @@
 extern crate log;
+use crate::ui::Ui;
 use clap::App;
 use clap::AppSettings;
 use clap::Arg;
-use clap::ArgGroup;
 use clap::ArgMatches;
-use clap::SubCommand;
-use hg::operations::DebugDataKind;
-use std::convert::TryFrom;
+use format_bytes::{format_bytes, join};
+use hg::config::Config;
+use hg::repo::{Repo, RepoError};
+use hg::utils::files::{get_bytes_from_os_str, get_path_from_bytes};
+use hg::utils::SliceExt;
+use std::ffi::OsString;
+use std::path::PathBuf;
+use std::process::Command;
 
-mod commands;
+mod blackbox;
 mod error;
 mod exitcode;
 mod ui;
-use commands::Command;
 use error::CommandError;
 
-fn main() {
-    env_logger::init();
+fn main_with_result(
+    process_start_time: &blackbox::ProcessStartTime,
+    ui: &ui::Ui,
+    repo: Result<&Repo, &NoRepoInCwdError>,
+    config: &Config,
+) -> Result<(), CommandError> {
+    check_extensions(config)?;
+
     let app = App::new("rhg")
-        .setting(AppSettings::AllowInvalidUtf8)
+        .global_setting(AppSettings::AllowInvalidUtf8)
+        .global_setting(AppSettings::DisableVersion)
         .setting(AppSettings::SubcommandRequired)
         .setting(AppSettings::VersionlessSubcommands)
-        .version("0.0.1")
-        .subcommand(
-            SubCommand::with_name("root").about(commands::root::HELP_TEXT),
-        )
-        .subcommand(
-            SubCommand::with_name("files")
-                .arg(
-                    Arg::with_name("rev")
-                        .help("search the repository as it is in REV")
-                        .short("-r")
-                        .long("--revision")
-                        .value_name("REV")
-                        .takes_value(true),
-                )
-                .about(commands::files::HELP_TEXT),
+        .arg(
+            Arg::with_name("repository")
+                .help("repository root directory")
+                .short("-R")
+                .long("--repository")
+                .value_name("REPO")
+                .takes_value(true)
+                // Both ok: `hg -R ./foo log` or `hg log -R ./foo`
+                .global(true),
         )
-        .subcommand(
-            SubCommand::with_name("cat")
-                .arg(
-                    Arg::with_name("rev")
-                        .help("search the repository as it is in REV")
-                        .short("-r")
-                        .long("--revision")
-                        .value_name("REV")
-                        .takes_value(true),
-                )
-                .arg(
-                    clap::Arg::with_name("files")
-                        .required(true)
-                        .multiple(true)
-                        .empty_values(false)
-                        .value_name("FILE")
-                        .help("Activity to start: activity@category"),
-                )
-                .about(commands::cat::HELP_TEXT),
+        .arg(
+            Arg::with_name("config")
+                .help("set/override config option (use 'section.name=value')")
+                .long("--config")
+                .value_name("CONFIG")
+                .takes_value(true)
+                .global(true)
+                // Ok: `--config section.key1=val --config section.key2=val2`
+                .multiple(true)
+                // Not ok: `--config section.key1=val section.key2=val2`
+                .number_of_values(1),
+        )
+        .arg(
+            Arg::with_name("cwd")
+                .help("change working directory")
+                .long("--cwd")
+                .value_name("DIR")
+                .takes_value(true)
+                .global(true),
         )
-        .subcommand(
-            SubCommand::with_name("debugdata")
-                .about(commands::debugdata::HELP_TEXT)
-                .arg(
-                    Arg::with_name("changelog")
-                        .help("open changelog")
-                        .short("-c")
-                        .long("--changelog"),
-                )
-                .arg(
-                    Arg::with_name("manifest")
-                        .help("open manifest")
-                        .short("-m")
-                        .long("--manifest"),
+        .version("0.0.1");
+    let app = add_subcommand_args(app);
+
+    let matches = app.clone().get_matches_safe()?;
+
+    let (subcommand_name, subcommand_matches) = matches.subcommand();
+    let run = subcommand_run_fn(subcommand_name)
+        .expect("unknown subcommand name from clap despite AppSettings::SubcommandRequired");
+    let subcommand_args = subcommand_matches
+        .expect("no subcommand arguments from clap despite AppSettings::SubcommandRequired");
+
+    let invocation = CliInvocation {
+        ui,
+        subcommand_args,
+        config,
+        repo,
+    };
+    let blackbox = blackbox::Blackbox::new(&invocation, process_start_time)?;
+    blackbox.log_command_start();
+    let result = run(&invocation);
+    blackbox.log_command_end(exit_code(&result));
+    result
+}
+
+fn main() {
+    // Run this first, before we find out if the blackbox extension is even
+    // enabled, in order to include everything in-between in the duration
+    // measurements. Reading config files can be slow if they’re on NFS.
+    let process_start_time = blackbox::ProcessStartTime::now();
+
+    env_logger::init();
+    let ui = ui::Ui::new();
+
+    let early_args = EarlyArgs::parse(std::env::args_os());
+
+    let initial_current_dir = early_args.cwd.map(|cwd| {
+        let cwd = get_path_from_bytes(&cwd);
+        std::env::current_dir()
+            .and_then(|initial| {
+                std::env::set_current_dir(cwd)?;
+                Ok(initial)
+            })
+            .unwrap_or_else(|error| {
+                exit(
+                    &None,
+                    &ui,
+                    OnUnsupported::Abort,
+                    Err(CommandError::abort(format!(
+                        "abort: {}: '{}'",
+                        error,
+                        cwd.display()
+                    ))),
                 )
-                .group(
-                    ArgGroup::with_name("")
-                        .args(&["changelog", "manifest"])
-                        .required(true),
-                )
-                .arg(
-                    Arg::with_name("rev")
-                        .help("revision")
-                        .required(true)
-                        .value_name("REV"),
-                ),
-        )
-        .subcommand(
-            SubCommand::with_name("debugrequirements")
-                .about(commands::debugrequirements::HELP_TEXT),
-        );
-
-    let matches = app.clone().get_matches_safe().unwrap_or_else(|err| {
-        let _ = ui::Ui::new().writeln_stderr_str(&err.message);
-        std::process::exit(exitcode::UNIMPLEMENTED_COMMAND)
+            })
     });
 
-    let ui = ui::Ui::new();
+    let non_repo_config =
+        Config::load(early_args.config).unwrap_or_else(|error| {
+            // Normally this is decided based on config, but we don’t have that
+            // available. As of this writing config loading never returns an
+            // "unsupported" error but that is not enforced by the type system.
+            let on_unsupported = OnUnsupported::Abort;
 
-    let command_result = match_subcommand(matches, &ui);
+            exit(&initial_current_dir, &ui, on_unsupported, Err(error.into()))
+        });
 
-    match command_result {
-        Ok(_) => std::process::exit(exitcode::OK),
-        Err(e) => {
-            let message = e.get_error_message_bytes();
-            if let Some(msg) = message {
-                match ui.write_stderr(&msg) {
-                    Ok(_) => (),
-                    Err(_) => std::process::exit(exitcode::ABORT),
-                };
-            };
-            e.exit()
+    if let Some(repo_path_bytes) = &early_args.repo {
+        lazy_static::lazy_static! {
+            static ref SCHEME_RE: regex::bytes::Regex =
+                // Same as `_matchscheme` in `mercurial/util.py`
+                regex::bytes::Regex::new("^[a-zA-Z0-9+.\\-]+:").unwrap();
+        }
+        if SCHEME_RE.is_match(&repo_path_bytes) {
+            exit(
+                &initial_current_dir,
+                &ui,
+                OnUnsupported::from_config(&ui, &non_repo_config),
+                Err(CommandError::UnsupportedFeature {
+                    message: format_bytes!(
+                        b"URL-like --repository {}",
+                        repo_path_bytes
+                    ),
+                }),
+            )
+        }
+    }
+    let repo_path = early_args.repo.as_deref().map(get_path_from_bytes);
+    let repo_result = match Repo::find(&non_repo_config, repo_path) {
+        Ok(repo) => Ok(repo),
+        Err(RepoError::NotFound { at }) if repo_path.is_none() => {
+            // Not finding a repo is not fatal yet, if `-R` was not given
+            Err(NoRepoInCwdError { cwd: at })
+        }
+        Err(error) => exit(
+            &initial_current_dir,
+            &ui,
+            OnUnsupported::from_config(&ui, &non_repo_config),
+            Err(error.into()),
+        ),
+    };
+
+    let config = if let Ok(repo) = &repo_result {
+        repo.config()
+    } else {
+        &non_repo_config
+    };
+    let on_unsupported = OnUnsupported::from_config(&ui, config);
+
+    let result = main_with_result(
+        &process_start_time,
+        &ui,
+        repo_result.as_ref(),
+        config,
+    );
+    exit(&initial_current_dir, &ui, on_unsupported, result)
+}
+
+fn exit_code(result: &Result<(), CommandError>) -> i32 {
+    match result {
+        Ok(()) => exitcode::OK,
+        Err(CommandError::Abort { .. }) => exitcode::ABORT,
+        Err(CommandError::Unsuccessful) => exitcode::UNSUCCESSFUL,
+
+        // Exit with a specific code and no error message to let a potential
+        // wrapper script fallback to Python-based Mercurial.
+        Err(CommandError::UnsupportedFeature { .. }) => {
+            exitcode::UNIMPLEMENTED
         }
     }
 }
 
-fn match_subcommand(
-    matches: ArgMatches,
-    ui: &ui::Ui,
-) -> Result<(), CommandError> {
-    match matches.subcommand() {
-        ("root", _) => commands::root::RootCommand::new().run(&ui),
-        ("files", Some(matches)) => {
-            commands::files::FilesCommand::try_from(matches)?.run(&ui)
+fn exit(
+    initial_current_dir: &Option<PathBuf>,
+    ui: &Ui,
+    mut on_unsupported: OnUnsupported,
+    result: Result<(), CommandError>,
+) -> ! {
+    if let (
+        OnUnsupported::Fallback { executable },
+        Err(CommandError::UnsupportedFeature { .. }),
+    ) = (&on_unsupported, &result)
+    {
+        let mut args = std::env::args_os();
+        let executable_path = get_path_from_bytes(&executable);
+        let this_executable = args.next().expect("exepcted argv[0] to exist");
+        if executable_path == &PathBuf::from(this_executable) {
+            // Avoid spawning infinitely many processes until resource
+            // exhaustion.
+            let _ = ui.write_stderr(&format_bytes!(
+                b"Blocking recursive fallback. The 'rhg.fallback-executable = {}' config \
+                points to `rhg` itself.\n",
+                executable
+            ));
+            on_unsupported = OnUnsupported::Abort
+        } else {
+            // `args` is now `argv[1..]` since we’ve already consumed `argv[0]`
+            let mut command = Command::new(executable_path);
+            command.args(args);
+            if let Some(initial) = initial_current_dir {
+                command.current_dir(initial);
+            }
+            let result = command.status();
+            match result {
+                Ok(status) => std::process::exit(
+                    status.code().unwrap_or(exitcode::ABORT),
+                ),
+                Err(error) => {
+                    let _ = ui.write_stderr(&format_bytes!(
+                        b"tried to fall back to a '{}' sub-process but got error {}\n",
+                        executable, format_bytes::Utf8(error)
+                    ));
+                    on_unsupported = OnUnsupported::Abort
+                }
+            }
         }
-        ("cat", Some(matches)) => {
-            commands::cat::CatCommand::try_from(matches)?.run(&ui)
-        }
-        ("debugdata", Some(matches)) => {
-            commands::debugdata::DebugDataCommand::try_from(matches)?.run(&ui)
+    }
+    exit_no_fallback(ui, on_unsupported, result)
+}
+
+fn exit_no_fallback(
+    ui: &Ui,
+    on_unsupported: OnUnsupported,
+    result: Result<(), CommandError>,
+) -> ! {
+    match &result {
+        Ok(_) => {}
+        Err(CommandError::Unsuccessful) => {}
+        Err(CommandError::Abort { message }) => {
+            if !message.is_empty() {
+                // Ignore errors when writing to stderr, we’re already exiting
+                // with failure code so there’s not much more we can do.
+                let _ = ui.write_stderr(&format_bytes!(b"{}\n", message));
+            }
         }
-        ("debugrequirements", _) => {
-            commands::debugrequirements::DebugRequirementsCommand::new()
-                .run(&ui)
+        Err(CommandError::UnsupportedFeature { message }) => {
+            match on_unsupported {
+                OnUnsupported::Abort => {
+                    let _ = ui.write_stderr(&format_bytes!(
+                        b"unsupported feature: {}\n",
+                        message
+                    ));
+                }
+                OnUnsupported::AbortSilent => {}
+                OnUnsupported::Fallback { .. } => unreachable!(),
+            }
         }
-        _ => unreachable!(), // Because of AppSettings::SubcommandRequired,
     }
+    std::process::exit(exit_code(&result))
 }
 
-impl<'a> TryFrom<&'a ArgMatches<'_>> for commands::files::FilesCommand<'a> {
-    type Error = CommandError;
+macro_rules! subcommands {
+    ($( $command: ident )+) => {
+        mod commands {
+            $(
+                pub mod $command;
+            )+
+        }
+
+        fn add_subcommand_args<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> {
+            app
+            $(
+                .subcommand(commands::$command::args())
+            )+
+        }
+
+        pub type RunFn = fn(&CliInvocation) -> Result<(), CommandError>;
+
+        fn subcommand_run_fn(name: &str) -> Option<RunFn> {
+            match name {
+                $(
+                    stringify!($command) => Some(commands::$command::run),
+                )+
+                _ => None,
+            }
+        }
+    };
+}
+
+subcommands! {
+    cat
+    debugdata
+    debugrequirements
+    files
+    root
+    config
+}
+pub struct CliInvocation<'a> {
+    ui: &'a Ui,
+    subcommand_args: &'a ArgMatches<'a>,
+    config: &'a Config,
+    /// References inside `Result` is a bit peculiar but allow
+    /// `invocation.repo?` to work out with `&CliInvocation` since this
+    /// `Result` type is `Copy`.
+    repo: Result<&'a Repo, &'a NoRepoInCwdError>,
+}
+
+struct NoRepoInCwdError {
+    cwd: PathBuf,
+}
 
-    fn try_from(args: &'a ArgMatches) -> Result<Self, Self::Error> {
-        let rev = args.value_of("rev");
-        Ok(commands::files::FilesCommand::new(rev))
+/// CLI arguments to be parsed "early" in order to be able to read
+/// configuration before using Clap. Ideally we would also use Clap for this,
+/// see <https://github.com/clap-rs/clap/discussions/2366>.
+///
+/// These arguments are still declared when we do use Clap later, so that Clap
+/// does not return an error for their presence.
+struct EarlyArgs {
+    /// Values of all `--config` arguments. (Possibly none)
+    config: Vec<Vec<u8>>,
+    /// Value of the `-R` or `--repository` argument, if any.
+    repo: Option<Vec<u8>>,
+    /// Value of the `--cwd` argument, if any.
+    cwd: Option<Vec<u8>>,
+}
+
+impl EarlyArgs {
+    fn parse(args: impl IntoIterator<Item = OsString>) -> Self {
+        let mut args = args.into_iter().map(get_bytes_from_os_str);
+        let mut config = Vec::new();
+        let mut repo = None;
+        let mut cwd = None;
+        // Use `while let` instead of `for` so that we can also call
+        // `args.next()` inside the loop.
+        while let Some(arg) = args.next() {
+            if arg == b"--config" {
+                if let Some(value) = args.next() {
+                    config.push(value)
+                }
+            } else if let Some(value) = arg.drop_prefix(b"--config=") {
+                config.push(value.to_owned())
+            }
+
+            if arg == b"--cwd" {
+                if let Some(value) = args.next() {
+                    cwd = Some(value)
+                }
+            } else if let Some(value) = arg.drop_prefix(b"--cwd=") {
+                cwd = Some(value.to_owned())
+            }
+
+            if arg == b"--repository" || arg == b"-R" {
+                if let Some(value) = args.next() {
+                    repo = Some(value)
+                }
+            } else if let Some(value) = arg.drop_prefix(b"--repository=") {
+                repo = Some(value.to_owned())
+            } else if let Some(value) = arg.drop_prefix(b"-R") {
+                repo = Some(value.to_owned())
+            }
+        }
+        Self { config, repo, cwd }
     }
 }
 
-impl<'a> TryFrom<&'a ArgMatches<'_>> for commands::cat::CatCommand<'a> {
-    type Error = CommandError;
+/// What to do when encountering some unsupported feature.
+///
+/// See `HgError::UnsupportedFeature` and `CommandError::UnsupportedFeature`.
+enum OnUnsupported {
+    /// Print an error message describing what feature is not supported,
+    /// and exit with code 252.
+    Abort,
+    /// Silently exit with code 252.
+    AbortSilent,
+    /// Try running a Python implementation
+    Fallback { executable: Vec<u8> },
+}
+
+impl OnUnsupported {
+    const DEFAULT: Self = OnUnsupported::Abort;
 
-    fn try_from(args: &'a ArgMatches) -> Result<Self, Self::Error> {
-        let rev = args.value_of("rev");
-        let files = match args.values_of("files") {
-            Some(files) => files.collect(),
-            None => vec![],
-        };
-        Ok(commands::cat::CatCommand::new(rev, files))
+    fn from_config(ui: &Ui, config: &Config) -> Self {
+        match config
+            .get(b"rhg", b"on-unsupported")
+            .map(|value| value.to_ascii_lowercase())
+            .as_deref()
+        {
+            Some(b"abort") => OnUnsupported::Abort,
+            Some(b"abort-silent") => OnUnsupported::AbortSilent,
+            Some(b"fallback") => OnUnsupported::Fallback {
+                executable: config
+                    .get(b"rhg", b"fallback-executable")
+                    .unwrap_or_else(|| {
+                        exit_no_fallback(
+                            ui,
+                            Self::Abort,
+                            Err(CommandError::abort(
+                                "abort: 'rhg.on-unsupported=fallback' without \
+                                'rhg.fallback-executable' set."
+                            )),
+                        )
+                    })
+                    .to_owned(),
+            },
+            None => Self::DEFAULT,
+            Some(_) => {
+                // TODO: warn about unknown config value
+                Self::DEFAULT
+            }
+        }
     }
 }
 
-impl<'a> TryFrom<&'a ArgMatches<'_>>
-    for commands::debugdata::DebugDataCommand<'a>
-{
-    type Error = CommandError;
+const SUPPORTED_EXTENSIONS: &[&[u8]] = &[b"blackbox", b"share"];
+
+fn check_extensions(config: &Config) -> Result<(), CommandError> {
+    let enabled = config.get_section_keys(b"extensions");
+
+    let mut unsupported = enabled;
+    for supported in SUPPORTED_EXTENSIONS {
+        unsupported.remove(supported);
+    }
 
-    fn try_from(args: &'a ArgMatches) -> Result<Self, Self::Error> {
-        let rev = args
-            .value_of("rev")
-            .expect("rev should be a required argument");
-        let kind = match (
-            args.is_present("changelog"),
-            args.is_present("manifest"),
-        ) {
-            (true, false) => DebugDataKind::Changelog,
-            (false, true) => DebugDataKind::Manifest,
-            (true, true) => {
-                unreachable!("Should not happen since options are exclusive")
-            }
-            (false, false) => {
-                unreachable!("Should not happen since options are required")
-            }
-        };
-        Ok(commands::debugdata::DebugDataCommand::new(rev, kind))
+    if let Some(ignored_list) =
+        config.get_simple_list(b"rhg", b"ignored-extensions")
+    {
+        for ignored in ignored_list {
+            unsupported.remove(ignored);
+        }
+    }
+
+    if unsupported.is_empty() {
+        Ok(())
+    } else {
+        Err(CommandError::UnsupportedFeature {
+            message: format_bytes!(
+                b"extensions: {} (consider adding them to 'rhg.ignored-extensions' config)",
+                join(unsupported, b", ")
+            ),
+        })
     }
 }
--- a/rust/rhg/src/ui.rs	Sat Mar 13 02:09:23 2021 -0500
+++ b/rust/rhg/src/ui.rs	Thu Mar 18 18:24:59 2021 -0400
@@ -49,11 +49,6 @@
 
         stderr.flush().or_else(handle_stderr_error)
     }
-
-    /// Write string line to stderr
-    pub fn writeln_stderr_str(&self, s: &str) -> Result<(), UiError> {
-        self.write_stderr(&format!("{}\n", s).as_bytes())
-    }
 }
 
 /// A buffered stdout writer for faster batch printing operations.
--- a/setup.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/setup.py	Thu Mar 18 18:24:59 2021 -0400
@@ -419,9 +419,9 @@
         ltag = sysstr(hg.run(ltagcmd))
         changessincecmd = ['log', '-T', 'x\n', '-r', "only(.,'%s')" % ltag]
         changessince = len(hg.run(changessincecmd).splitlines())
-        version = '%s+%s-%s' % (ltag, changessince, hgid)
+        version = '%s+hg%s.%s' % (ltag, changessince, hgid)
     if version.endswith('+'):
-        version += time.strftime('%Y%m%d')
+        version = version[:-1] + 'local' + time.strftime('%Y%m%d')
 elif os.path.exists('.hg_archival.txt'):
     kw = dict(
         [[t.strip() for t in l.split(':', 1)] for l in open('.hg_archival.txt')]
@@ -430,11 +430,13 @@
         version = kw['tag']
     elif 'latesttag' in kw:
         if 'changessincelatesttag' in kw:
-            version = '%(latesttag)s+%(changessincelatesttag)s-%(node).12s' % kw
+            version = (
+                '%(latesttag)s+hg%(changessincelatesttag)s.%(node).12s' % kw
+            )
         else:
-            version = '%(latesttag)s+%(latesttagdistance)s-%(node).12s' % kw
+            version = '%(latesttag)s+hg%(latesttagdistance)s.%(node).12s' % kw
     else:
-        version = kw.get('node', '')[:12]
+        version = '0+hg' + kw.get('node', '')[:12]
 
 if version:
     versionb = version
@@ -451,20 +453,6 @@
         ),
     )
 
-try:
-    oldpolicy = os.environ.get('HGMODULEPOLICY', None)
-    os.environ['HGMODULEPOLICY'] = 'py'
-    from mercurial import __version__
-
-    version = __version__.version
-except ImportError:
-    version = b'unknown'
-finally:
-    if oldpolicy is None:
-        del os.environ['HGMODULEPOLICY']
-    else:
-        os.environ['HGMODULEPOLICY'] = oldpolicy
-
 
 class hgbuild(build):
     # Insert hgbuildmo first so that files in mercurial/locale/ are found
@@ -609,6 +597,12 @@
         # and its build is not explictely disabled (for external build
         # as Linux distributions would do)
         if self.distribution.rust and self.rust:
+            if not sys.platform.startswith('linux'):
+                self.warn(
+                    "rust extensions have only been tested on Linux "
+                    "and may not behave correctly on other platforms"
+                )
+
             for rustext in ruststandalones:
                 rustext.build('' if self.inplace else self.build_lib)
 
@@ -1677,8 +1671,8 @@
 # unicode on Python 2 still works because it won't contain any
 # non-ascii bytes and will be implicitly converted back to bytes
 # when operated on.
-assert isinstance(version, bytes)
-setupversion = version.decode('ascii')
+assert isinstance(version, str)
+setupversion = version
 
 extra = {}
 
--- a/tests/common-pattern.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/common-pattern.py	Thu Mar 18 18:24:59 2021 -0400
@@ -20,7 +20,6 @@
         br'phases%253Dheads%250A'
         br'pushkey%250A'
         br'remote-changegroup%253Dhttp%252Chttps%250A'
-        br'rev-branch-cache%250A'
         br'stream%253Dv2',
         # (the replacement patterns)
         br'$USUAL_BUNDLE_CAPS$',
@@ -53,7 +52,6 @@
         br'phases%3Dheads%0A'
         br'pushkey%0A'
         br'remote-changegroup%3Dhttp%2Chttps%0A'
-        br'rev-branch-cache%0A'
         br'stream%3Dv2',
         # (replacement patterns)
         br'$USUAL_BUNDLE2_CAPS$',
@@ -70,8 +68,7 @@
         br'listkeys%0A'
         br'phases%3Dheads%0A'
         br'pushkey%0A'
-        br'remote-changegroup%3Dhttp%2Chttps%0A'
-        br'rev-branch-cache',
+        br'remote-changegroup%3Dhttp%2Chttps',
         # (replacement patterns)
         br'$USUAL_BUNDLE2_CAPS_SERVER$',
     ),
@@ -85,7 +82,6 @@
         br'listkeys%0A'
         br'pushkey%0A'
         br'remote-changegroup%3Dhttp%2Chttps%0A'
-        br'rev-branch-cache%0A'
         br'stream%3Dv2',
         # (replacement patterns)
         br'$USUAL_BUNDLE2_CAPS_NO_PHASES$',
--- a/tests/flagprocessorext.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/flagprocessorext.py	Thu Mar 18 18:24:59 2021 -0400
@@ -31,28 +31,28 @@
     return False
 
 
-def noopdonothing(self, text, sidedata):
+def noopdonothing(self, text):
     return (text, True)
 
 
 def noopdonothingread(self, text):
-    return (text, True, {})
+    return (text, True)
 
 
-def b64encode(self, text, sidedata):
+def b64encode(self, text):
     return (base64.b64encode(text), False)
 
 
 def b64decode(self, text):
-    return (base64.b64decode(text), True, {})
+    return (base64.b64decode(text), True)
 
 
-def gzipcompress(self, text, sidedata):
+def gzipcompress(self, text):
     return (zlib.compress(text), False)
 
 
 def gzipdecompress(self, text):
-    return (zlib.decompress(text), True, {})
+    return (zlib.decompress(text), True)
 
 
 def supportedoutgoingversions(orig, repo):
--- a/tests/hghave.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/hghave.py	Thu Mar 18 18:24:59 2021 -0400
@@ -188,6 +188,11 @@
     return 'CHGHG' in os.environ
 
 
+@check("rhg", "running with rhg as 'hg'")
+def has_rhg():
+    return 'RHG_INSTALLED_AS_HG' in os.environ
+
+
 @check("cvs", "cvs client/server")
 def has_cvs():
     re = br'Concurrent Versions System.*?server'
@@ -591,7 +596,7 @@
     return matchoutput("pylint --help", br"Usage:[ ]+pylint", True)
 
 
-@check("clang-format", "clang-format C code formatter")
+@check("clang-format", "clang-format C code formatter (>= 11)")
 def has_clang_format():
     m = matchoutput('clang-format --version', br'clang-format version (\d+)')
     # style changed somewhere between 10.x and 11.x
@@ -702,6 +707,12 @@
     return os.path.isdir(os.path.join(t, "..", ".hg"))
 
 
+@check("network-io", "whether tests are allowed to access 3rd party services")
+def has_test_repo():
+    t = os.environ.get("HGTESTS_ALLOW_NETIO")
+    return t == "1"
+
+
 @check("curses", "terminfo compiler and curses module")
 def has_curses():
     try:
@@ -1034,7 +1045,7 @@
     return matchoutput('sqlite3 -version', br'^3\.\d+')
 
 
-@check('vcr', 'vcr http mocking library')
+@check('vcr', 'vcr http mocking library (pytest-vcr)')
 def has_vcr():
     try:
         import vcr
@@ -1054,7 +1065,7 @@
     return matchoutput('emacs --version', b'GNU Emacs 2(4.4|4.5|5|6|7|8|9)')
 
 
-@check('black', 'the black formatter for python')
+@check('black', 'the black formatter for python (>= 20.8b1)')
 def has_black():
     blackcmd = 'black --version'
     version_regex = b'black, version ([0-9a-b.]+)'
--- a/tests/remotefilelog-getflogheads.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/remotefilelog-getflogheads.py	Thu Mar 18 18:24:59 2021 -0400
@@ -21,7 +21,10 @@
     dest = repo.ui.expandpath(b'default')
     peer = hg.peer(repo, {}, dest)
 
-    flogheads = peer.x_rfl_getflogheads(path)
+    try:
+        flogheads = peer.x_rfl_getflogheads(path)
+    finally:
+        peer.close()
 
     if flogheads:
         for head in flogheads:
--- a/tests/run-tests.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/run-tests.py	Thu Mar 18 18:24:59 2021 -0400
@@ -540,6 +540,11 @@
         action="store_true",
         help="show chg debug logs",
     )
+    hgconf.add_argument(
+        "--rhg",
+        action="store_true",
+        help="install and use rhg Rust implementation in place of hg",
+    )
     hgconf.add_argument("--compiler", help="compiler to build with")
     hgconf.add_argument(
         '--extra-config-opt',
@@ -552,6 +557,7 @@
         "--local",
         action="store_true",
         help="shortcut for --with-hg=<testdir>/../hg, "
+        "--with-rhg=<testdir>/../rust/target/release/rhg if --rhg is set, "
         "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
     )
     hgconf.add_argument(
@@ -580,6 +586,11 @@
         help="use specified chg wrapper in place of hg",
     )
     hgconf.add_argument(
+        "--with-rhg",
+        metavar="RHG",
+        help="use specified rhg Rust implementation in place of hg",
+    )
+    hgconf.add_argument(
         "--with-hg",
         metavar="HG",
         help="test using specified hg script rather than a "
@@ -667,13 +678,17 @@
         parser.error('--rust cannot be used with --no-rust')
 
     if options.local:
-        if options.with_hg or options.with_chg:
-            parser.error('--local cannot be used with --with-hg or --with-chg')
+        if options.with_hg or options.with_rhg or options.with_chg:
+            parser.error(
+                '--local cannot be used with --with-hg or --with-rhg or --with-chg'
+            )
         testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
         reporootdir = os.path.dirname(testdir)
         pathandattrs = [(b'hg', 'with_hg')]
         if options.chg:
             pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
+        if options.rhg:
+            pathandattrs.append((b'rust/target/release/rhg', 'with_rhg'))
         for relpath, attr in pathandattrs:
             binpath = os.path.join(reporootdir, relpath)
             if os.name != 'nt' and not os.access(binpath, os.X_OK):
@@ -696,6 +711,8 @@
 
     if (options.chg or options.with_chg) and os.name == 'nt':
         parser.error('chg does not work on %s' % os.name)
+    if (options.rhg or options.with_rhg) and os.name == 'nt':
+        parser.error('rhg does not work on %s' % os.name)
     if options.with_chg:
         options.chg = False  # no installation to temporary location
         options.with_chg = canonpath(_sys2bytes(options.with_chg))
@@ -704,12 +721,28 @@
             and os.access(options.with_chg, os.X_OK)
         ):
             parser.error('--with-chg must specify a chg executable')
+    if options.with_rhg:
+        options.rhg = False  # no installation to temporary location
+        options.with_rhg = canonpath(_sys2bytes(options.with_rhg))
+        if not (
+            os.path.isfile(options.with_rhg)
+            and os.access(options.with_rhg, os.X_OK)
+        ):
+            parser.error('--with-rhg must specify a rhg executable')
     if options.chg and options.with_hg:
         # chg shares installation location with hg
         parser.error(
             '--chg does not work when --with-hg is specified '
             '(use --with-chg instead)'
         )
+    if options.rhg and options.with_hg:
+        # rhg shares installation location with hg
+        parser.error(
+            '--rhg does not work when --with-hg is specified '
+            '(use --with-rhg instead)'
+        )
+    if options.rhg and options.chg:
+        parser.error('--rhg and --chg do not work together')
 
     if options.color == 'always' and not pygmentspresent:
         sys.stderr.write(
@@ -2278,7 +2311,7 @@
                         if test.path.endswith(b'.t'):
                             rename(test.errpath, test.path)
                         else:
-                            rename(test.errpath, '%s.out' % test.path)
+                            rename(test.errpath, b'%s.out' % test.path)
                         accepted = True
             if not accepted:
                 self.faildata[test.name] = b''.join(lines)
@@ -3098,6 +3131,25 @@
             chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
             self._hgcommand = os.path.basename(self.options.with_chg)
 
+        # configure fallback and replace "hg" command by "rhg"
+        rhgbindir = self._bindir
+        if self.options.rhg or self.options.with_rhg:
+            # Affects hghave.py
+            osenvironb[b'RHG_INSTALLED_AS_HG'] = b'1'
+            # Affects configuration. Alternatives would be setting configuration through
+            # `$HGRCPATH` but some tests override that, or changing `_hgcommand` to include
+            # `--config` but that disrupts tests that print command lines and check expected
+            # output.
+            osenvironb[b'RHG_ON_UNSUPPORTED'] = b'fallback'
+            osenvironb[b'RHG_FALLBACK_EXECUTABLE'] = os.path.join(
+                self._bindir, self._hgcommand
+            )
+        if self.options.rhg:
+            self._hgcommand = b'rhg'
+        elif self.options.with_rhg:
+            rhgbindir = os.path.dirname(os.path.realpath(self.options.with_rhg))
+            self._hgcommand = os.path.basename(self.options.with_rhg)
+
         osenvironb[b"BINDIR"] = self._bindir
         osenvironb[b"PYTHON"] = PYTHON
 
@@ -3116,6 +3168,8 @@
             path.insert(2, realdir)
         if chgbindir != self._bindir:
             path.insert(1, chgbindir)
+        if rhgbindir != self._bindir:
+            path.insert(1, rhgbindir)
         if self._testdir != runtestdir:
             path = [self._testdir] + path
         if self._tmpbindir != self._bindir:
@@ -3335,6 +3389,9 @@
                 if self.options.chg:
                     assert self._installdir
                     self._installchg()
+                if self.options.rhg:
+                    assert self._installdir
+                    self._installrhg()
 
                 log(
                     'running %d tests using %d parallel processes'
@@ -3696,6 +3753,33 @@
                 sys.stdout.write(out)
             sys.exit(1)
 
+    def _installrhg(self):
+        """Install rhg into the test environment"""
+        vlog('# Performing temporary installation of rhg')
+        assert os.path.dirname(self._bindir) == self._installdir
+        assert self._hgroot, 'must be called after _installhg()'
+        cmd = b'"%(make)s" install-rhg PREFIX="%(prefix)s"' % {
+            b'make': b'make',  # TODO: switch by option or environment?
+            b'prefix': self._installdir,
+        }
+        cwd = self._hgroot
+        vlog("# Running", cmd)
+        proc = subprocess.Popen(
+            cmd,
+            shell=True,
+            cwd=cwd,
+            stdin=subprocess.PIPE,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT,
+        )
+        out, _err = proc.communicate()
+        if proc.returncode != 0:
+            if PYTHON3:
+                sys.stdout.buffer.write(out)
+            else:
+                sys.stdout.write(out)
+            sys.exit(1)
+
     def _outputcoverage(self):
         """Produce code coverage output."""
         import coverage
--- a/tests/simplestorerepo.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/simplestorerepo.py	Thu Mar 18 18:24:59 2021 -0400
@@ -106,7 +106,9 @@
 
     _flagserrorclass = simplestoreerror
 
-    def __init__(self, svfs, path):
+    def __init__(self, repo, svfs, path):
+        self.nullid = repo.nullid
+        self._repo = repo
         self._svfs = svfs
         self._path = path
 
@@ -300,7 +302,7 @@
             text = rawtext
         else:
             r = flagutil.processflagsread(self, rawtext, flags)
-            text, validatehash, sidedata = r
+            text, validatehash = r
         if validatehash:
             self.checkhash(text, node, rev=rev)
 
@@ -446,6 +448,7 @@
         revisiondata=False,
         assumehaveparentrevisions=False,
         deltamode=repository.CG_DELTAMODE_STD,
+        sidedata_helpers=None,
     ):
         # TODO this will probably break on some ordering options.
         nodes = [n for n in nodes if n != nullid]
@@ -459,6 +462,7 @@
             revisiondata=revisiondata,
             assumehaveparentrevisions=assumehaveparentrevisions,
             deltamode=deltamode,
+            sidedata_helpers=sidedata_helpers,
         ):
             yield delta
 
@@ -550,7 +554,7 @@
 
             if node in self._indexbynode:
                 if duplicaterevisioncb:
-                    duplicaterevisioncb(self, node)
+                    duplicaterevisioncb(self, self.rev(node))
                 empty = False
                 continue
 
@@ -560,12 +564,12 @@
             else:
                 text = mdiff.patch(self.revision(deltabase), delta)
 
-            self._addrawrevision(
+            rev = self._addrawrevision(
                 node, text, transaction, linkrev, p1, p2, flags
             )
 
             if addrevisioncb:
-                addrevisioncb(self, node)
+                addrevisioncb(self, rev)
             empty = False
         return not empty
 
@@ -687,7 +691,7 @@
 
     class simplestorerepo(repo.__class__):
         def file(self, f):
-            return filestorage(self.svfs, f)
+            return filestorage(repo, self.svfs, f)
 
     repo.__class__ = simplestorerepo
 
--- a/tests/svnxml.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/svnxml.py	Thu Mar 18 18:24:59 2021 -0400
@@ -15,6 +15,7 @@
     e['revision'] = entry.getAttribute('revision')
     e['author'] = xmltext(entry.getElementsByTagName('author')[0])
     e['msg'] = xmltext(entry.getElementsByTagName('msg')[0])
+    e['date'] = xmltext(entry.getElementsByTagName('date')[0])
     e['paths'] = []
     paths = entry.getElementsByTagName('paths')
     if paths:
@@ -42,7 +43,7 @@
     except AttributeError:
         fp = sys.stdout
     for e in entries:
-        for k in ('revision', 'author', 'msg'):
+        for k in ('revision', 'author', 'date', 'msg'):
             fp.write(('%s: %s\n' % (k, e[k])).encode('utf-8'))
         for path, action, fpath, frev in sorted(e['paths']):
             frominfo = b''
--- a/tests/test-acl.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-acl.t	Thu Mar 18 18:24:59 2021 -0400
@@ -109,14 +109,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -175,14 +175,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -204,6 +204,7 @@
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
   bundle2-input-bundle: 5 parts total
+  truncating cache/rbc-revs-v1 to 8
   updating the branch cache
   added 3 changesets with 3 changes to 3 files
   bundle2-output-bundle: "HG20", 1 parts total
@@ -244,14 +245,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -283,6 +284,7 @@
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
   bundle2-input-bundle: 5 parts total
+  truncating cache/rbc-revs-v1 to 8
   updating the branch cache
   added 3 changesets with 3 changes to 3 files
   bundle2-output-bundle: "HG20", 1 parts total
@@ -323,14 +325,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -359,6 +361,7 @@
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
+  truncating cache/rbc-revs-v1 to 8
   abort: acl: user "fred" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
   no rollback information available
   0:6675d58eff77
@@ -393,14 +396,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -468,14 +471,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -540,14 +543,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -617,14 +620,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -691,14 +694,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -764,7 +767,7 @@
   list of changesets:
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   bundle2-output-bundle: "HG20", 7 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:bookmarks" 37 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
@@ -773,7 +776,7 @@
   bundle2-output-part: "bookmarks" 37 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:bookmarks" supported
   bundle2-input-part: total payload size 37
   bundle2-input-part: "check:phases" supported
@@ -853,7 +856,7 @@
   list of changesets:
   ef1ea85a6374b77d6da9dcda9541f498f2d17df7
   bundle2-output-bundle: "HG20", 7 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:bookmarks" 37 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
@@ -862,7 +865,7 @@
   bundle2-output-part: "bookmarks" 37 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:bookmarks" supported
   bundle2-input-part: total payload size 37
   bundle2-input-part: "check:phases" supported
@@ -897,6 +900,7 @@
   bundle2-input-bundle: 7 parts total
   transaction abort!
   rollback completed
+  truncating cache/rbc-revs-v1 to 8
   abort: acl: user "fred" denied on bookmark "moving-bookmark" (changeset "ef1ea85a6374b77d6da9dcda9541f498f2d17df7")
   no rollback information available
   0:6675d58eff77
@@ -943,14 +947,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -1029,14 +1033,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -1069,6 +1073,7 @@
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
+  truncating cache/rbc-revs-v1 to 8
   abort: acl: user "wilma" not allowed on "quux/file.py" (changeset "911600dab2ae")
   no rollback information available
   0:6675d58eff77
@@ -1112,14 +1117,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -1190,14 +1195,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -1279,14 +1284,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -1369,14 +1374,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -1408,6 +1413,7 @@
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
   bundle2-input-bundle: 5 parts total
+  truncating cache/rbc-revs-v1 to 8
   updating the branch cache
   added 3 changesets with 3 changes to 3 files
   bundle2-output-bundle: "HG20", 1 parts total
@@ -1455,14 +1461,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -1493,6 +1499,7 @@
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
+  truncating cache/rbc-revs-v1 to 8
   abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
   no rollback information available
   0:6675d58eff77
@@ -1537,14 +1544,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -1624,14 +1631,14 @@
   f9cafe1212c8c6fa1120d14a556e18cc44ff8bdd
   911600dab2ae7a9baff75958b84fe606851ce955
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 24
   bundle2-input-part: "check:updated-heads" supported
@@ -1664,6 +1671,7 @@
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
+  truncating cache/rbc-revs-v1 to 8
   abort: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
   no rollback information available
   0:6675d58eff77
@@ -1746,14 +1754,14 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
   bundle2-input-part: "check:updated-heads" supported
@@ -1833,14 +1841,14 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
   bundle2-input-part: "check:updated-heads" supported
@@ -1911,14 +1919,14 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
   bundle2-input-part: "check:updated-heads" supported
@@ -1985,14 +1993,14 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
   bundle2-input-part: "check:updated-heads" supported
@@ -2053,14 +2061,14 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
   bundle2-input-part: "check:updated-heads" supported
@@ -2145,14 +2153,14 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
   bundle2-input-part: "check:updated-heads" supported
@@ -2236,14 +2244,14 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
   bundle2-input-part: "check:updated-heads" supported
@@ -2309,14 +2317,14 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
   bundle2-input-part: "check:updated-heads" supported
@@ -2394,14 +2402,14 @@
   911600dab2ae7a9baff75958b84fe606851ce955
   e8fc755d4d8217ee5b0c2bb41558c40d43b92c01
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 48 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 224
+  bundle2-input-part: total payload size 207
   bundle2-input-part: "check:phases" supported
   bundle2-input-part: total payload size 48
   bundle2-input-part: "check:updated-heads" supported
--- a/tests/test-audit-subrepo.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-audit-subrepo.t	Thu Mar 18 18:24:59 2021 -0400
@@ -323,7 +323,7 @@
   new changesets 7a2f0e59146f
   .hgsubstate: untracked file differs
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
   $ cat main5/.hg/hgrc | grep pwned
   [1]
 
@@ -623,7 +623,7 @@
   new changesets * (glob)
   .hgsubstate: untracked file differs
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
   $ ls "$FAKEHOME"
   a
   $ test -d "$FAKEHOME/.hg"
@@ -652,7 +652,7 @@
   new changesets * (glob)
   .hgsubstate: untracked file differs
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
   $ ls -A "$FAKEHOME"
   .hg
   a
--- a/tests/test-batching.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-batching.py	Thu Mar 18 18:24:59 2021 -0400
@@ -204,7 +204,7 @@
 
     @wireprotov1peer.batchable
     def foo(self, one, two=None):
-        encargs = [
+        encoded_args = [
             (
                 b'one',
                 mangle(one),
@@ -214,9 +214,9 @@
                 mangle(two),
             ),
         ]
-        encresref = wireprotov1peer.future()
-        yield encargs, encresref
-        yield unmangle(encresref.value)
+        encoded_res_future = wireprotov1peer.future()
+        yield encoded_args, encoded_res_future
+        yield unmangle(encoded_res_future.value)
 
     @wireprotov1peer.batchable
     def bar(self, b, a):
--- a/tests/test-bookmarks-pushpull.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-bookmarks-pushpull.t	Thu Mar 18 18:24:59 2021 -0400
@@ -129,10 +129,10 @@
   bundle2-output: bundle parameter: 
   bundle2-output: start of parts
   bundle2-output: bundle part: "replycaps"
-  bundle2-output-part: "replycaps" 241 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output: part 0: "REPLYCAPS"
   bundle2-output: header chunk size: 16
-  bundle2-output: payload chunk size: 241
+  bundle2-output: payload chunk size: 224
   bundle2-output: closing payload chunk
   bundle2-output: bundle part: "check:bookmarks"
   bundle2-output-part: "check:bookmarks" 23 bytes payload
@@ -162,9 +162,9 @@
   bundle2-input: part parameters: 0
   bundle2-input: found a handler for part replycaps
   bundle2-input-part: "replycaps" supported
-  bundle2-input: payload chunk size: 241
+  bundle2-input: payload chunk size: 224
   bundle2-input: payload chunk size: 0
-  bundle2-input-part: total payload size 241
+  bundle2-input-part: total payload size 224
   bundle2-input: part header size: 22
   bundle2-input: part type: "CHECK:BOOKMARKS"
   bundle2-input: part id: "1"
@@ -241,10 +241,10 @@
   bundle2-output: bundle parameter: 
   bundle2-output: start of parts
   bundle2-output: bundle part: "replycaps"
-  bundle2-output-part: "replycaps" 241 bytes payload
+  bundle2-output-part: "replycaps" 224 bytes payload
   bundle2-output: part 0: "REPLYCAPS"
   bundle2-output: header chunk size: 16
-  bundle2-output: payload chunk size: 241
+  bundle2-output: payload chunk size: 224
   bundle2-output: closing payload chunk
   bundle2-output: bundle part: "check:bookmarks"
   bundle2-output-part: "check:bookmarks" 23 bytes payload
@@ -275,9 +275,9 @@
   bundle2-input: part parameters: 0
   bundle2-input: found a handler for part replycaps
   bundle2-input-part: "replycaps" supported
-  bundle2-input: payload chunk size: 241
+  bundle2-input: payload chunk size: 224
   bundle2-input: payload chunk size: 0
-  bundle2-input-part: total payload size 241
+  bundle2-input-part: total payload size 224
   bundle2-input: part header size: 22
   bundle2-input: part type: "CHECK:BOOKMARKS"
   bundle2-input: part id: "1"
@@ -1177,7 +1177,7 @@
   searching for changes
   no changes found
   abort: prepushkey hook exited with status 1
-  [255]
+  [40]
 
 #endif
 
--- a/tests/test-bookmarks.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-bookmarks.t	Thu Mar 18 18:24:59 2021 -0400
@@ -1125,7 +1125,7 @@
   transaction abort!
   rollback completed
   abort: pretxnclose hook exited with status 1
-  [255]
+  [40]
   $ cp .hg/bookmarks.pending.saved .hg/bookmarks.pending
 
 (check visible bookmarks while transaction running in repo)
@@ -1158,7 +1158,7 @@
   transaction abort!
   rollback completed
   abort: pretxnclose hook exited with status 1
-  [255]
+  [40]
 
 Check pretxnclose-bookmark can abort a transaction
 --------------------------------------------------
@@ -1242,7 +1242,7 @@
   transaction abort!
   rollback completed
   abort: pretxnclose-bookmark.force-public hook exited with status 1
-  [255]
+  [40]
 
 create on a public changeset
 
@@ -1254,4 +1254,4 @@
   transaction abort!
   rollback completed
   abort: pretxnclose-bookmark.force-forward hook exited with status 1
-  [255]
+  [40]
--- a/tests/test-bundle-r.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-bundle-r.t	Thu Mar 18 18:24:59 2021 -0400
@@ -223,7 +223,7 @@
   adding changesets
   transaction abort!
   rollback completed
-  abort: 00changelog.i@93ee6ab32777: unknown parent
+  abort: 00changelog.i@93ee6ab32777cd430e07da694794fb6a4f917712: unknown parent
   [50]
 
 revision 2
--- a/tests/test-bundle-type.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-bundle-type.t	Thu Mar 18 18:24:59 2021 -0400
@@ -201,6 +201,15 @@
   (see 'hg help bundlespec' for supported values for --type)
   [10]
 
+zstd supports threading
+
+  $ hg init test-compthreads
+  $ cd test-compthreads
+  $ hg debugbuilddag +3
+  $ hg --config experimental.bundlecompthreads=1 bundle -a -t zstd-v2 zstd-v2-threaded.hg
+  3 changesets found
+  $ cd ..
+
 #else
 
 zstd is a valid engine but isn't available
--- a/tests/test-bundle.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-bundle.t	Thu Mar 18 18:24:59 2021 -0400
@@ -733,7 +733,7 @@
 partial history bundle, fails w/ unknown parent
 
   $ hg -R bundle.hg verify
-  abort: 00changelog.i@bbd179dfa0a7: unknown parent
+  abort: 00changelog.i@bbd179dfa0a71671c253b3ae0aa1513b60d199fa: unknown parent
   [50]
 
 full history bundle, refuses to verify non-local repo
--- a/tests/test-bundle2-exchange.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-bundle2-exchange.t	Thu Mar 18 18:24:59 2021 -0400
@@ -638,7 +638,7 @@
   remote: Cleaning up the mess...
   remote: rollback completed
   abort: pretxnclose.failpush hook exited with status 1
-  [255]
+  [40]
 
   $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
   pushing to ssh://user@dummy/other
@@ -699,7 +699,7 @@
   remote: Cleaning up the mess...
   remote: rollback completed
   abort: pretxnchangegroup hook exited with status 1
-  [255]
+  [40]
   $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
   pushing to ssh://user@dummy/other
   searching for changes
@@ -747,7 +747,7 @@
   Cleaning up the mess...
   rollback completed
   abort: pretxnchangegroup hook exited with status 1
-  [255]
+  [40]
   $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
   pushing to ssh://user@dummy/other
   searching for changes
--- a/tests/test-check-code.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-check-code.t	Thu Mar 18 18:24:59 2021 -0400
@@ -11,6 +11,7 @@
   > -X contrib/python-zstandard \
   > -X hgext/fsmonitor/pywatchman \
   > -X mercurial/thirdparty \
+  > -X mercurial/pythoncapi_compat.h \
   > | sed 's-\\-/-g' | "$check_code" --warnings --per-file=0 - || false
   Skipping contrib/automation/hgautomation/__init__.py it has no-che?k-code (glob)
   Skipping contrib/automation/hgautomation/aws.py it has no-che?k-code (glob)
@@ -65,10 +66,10 @@
   COPYING
   Makefile
   README.rst
-  black.toml
   hg
   hgeditor
   hgweb.cgi
+  pyproject.toml
   rustfmt.toml
   setup.py
 
--- a/tests/test-check-format.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-check-format.t	Thu Mar 18 18:24:59 2021 -0400
@@ -1,5 +1,5 @@
 #require black test-repo
 
   $ cd $RUNTESTDIR/..
-  $ black --config=black.toml --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/**'`
+  $ black --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/**'`
 
--- a/tests/test-check-interfaces.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-check-interfaces.py	Thu Mar 18 18:24:59 2021 -0400
@@ -85,6 +85,7 @@
 class dummyrepo(object):
     def __init__(self):
         self.ui = uimod.ui()
+        self._wanted_sidedata = set()
 
     def filtered(self, name):
         pass
@@ -113,6 +114,10 @@
     def close(self):
         pass
 
+    @property
+    def closed(self):
+        pass
+
 
 def main():
     ui = uimod.ui()
@@ -243,7 +248,10 @@
 
     # Conforms to imanifestlog.
     ml = manifest.manifestlog(
-        vfs, repo, manifest.manifestrevlog(repo.svfs), repo.narrowmatch()
+        vfs,
+        repo,
+        manifest.manifestrevlog(repo.nodeconstants, repo.svfs),
+        repo.narrowmatch(),
     )
     checkzobject(ml)
     checkzobject(repo.manifestlog)
@@ -258,7 +266,7 @@
     # Conforms to imanifestdict.
     checkzobject(mctx.read())
 
-    mrl = manifest.manifestrevlog(vfs)
+    mrl = manifest.manifestrevlog(repo.nodeconstants, vfs)
     checkzobject(mrl)
 
     ziverify.verifyClass(repository.irevisiondelta, revlog.revlogrevisiondelta)
@@ -272,6 +280,7 @@
         flags=b'',
         baserevisionsize=None,
         revision=b'',
+        sidedata=b'',
         delta=None,
     )
     checkzobject(rd)
--- a/tests/test-check-module-imports.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-check-module-imports.t	Thu Mar 18 18:24:59 2021 -0400
@@ -14,6 +14,10 @@
 Known-bad files are excluded by -X as some of them would produce unstable
 outputs, which should be fixed later.
 
+NOTE: the `hg locate` command here only works on files that are known to
+Mercurial. If you add an import of a new file and haven't yet `hg add`ed it, you
+will likely receive warnings about a direct import.
+
   $ testrepohg locate 'set:**.py or grep(r"^#!.*?python")' \
   > 'tests/**.t' \
   > -X hgweb.cgi \
--- a/tests/test-churn.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-churn.t	Thu Mar 18 18:24:59 2021 -0400
@@ -195,3 +195,22 @@
   alltogether     11 *********************************************************
 
   $ cd ..
+
+count lines that look like headings but are not
+
+  $ hg init not-headers
+  $ cd not-headers
+  $ cat > a <<EOF
+  > diff
+  > @@ -195,3 +195,21 @@
+  > -- a/tests/test-churn.t
+  > ++ b/tests/test-churn.t
+  > EOF
+  $ hg ci -Am adda -u user1
+  adding a
+  $ hg churn --diffstat
+  user1           +4/-0 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
+  $ hg rm a
+  $ hg ci -Am removea -u user1
+  $ hg churn --diffstat
+  user1           +4/-4 +++++++++++++++++++++++++++---------------------------
--- a/tests/test-clone-uncompressed.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-clone-uncompressed.t	Thu Mar 18 18:24:59 2021 -0400
@@ -73,7 +73,6 @@
     remote-changegroup
       http
       https
-    rev-branch-cache
 
   $ hg clone --stream -U http://localhost:$HGPORT server-disabled
   warning: stream clone requested but server has them disabled
@@ -141,7 +140,6 @@
     remote-changegroup
       http
       https
-    rev-branch-cache
 
   $ hg clone --stream -U http://localhost:$HGPORT server-disabled
   warning: stream clone requested but server has them disabled
--- a/tests/test-clonebundles.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-clonebundles.t	Thu Mar 18 18:24:59 2021 -0400
@@ -589,9 +589,7 @@
   bundle2-input-part: "listkeys" (params: 1 mandatory) supported
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
-  bundle2-input-part: total payload size 59
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 3 parts total
   checking for updated bookmarks
   updating the branch cache
   added 2 changesets with 2 changes to 2 files
--- a/tests/test-commandserver.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-commandserver.t	Thu Mar 18 18:24:59 2021 -0400
@@ -522,7 +522,7 @@
   transaction abort!
   rollback completed
   abort: pretxncommit hook exited with status 1
-   [255]
+   [40]
   *** runcommand verify
   checking changesets
   checking manifests
@@ -1013,7 +1013,7 @@
   transaction abort!
   rollback completed
   abort: pretxncommit hook exited with status 1
-   [255]
+   [40]
   *** runcommand log
   *** runcommand verify -q
 
@@ -1057,7 +1057,7 @@
   transaction abort!
   rollback completed
   abort: pretxncommit hook exited with status 1
-   [255]
+   [40]
   *** runcommand log
   0 bar (bar)
   *** runcommand verify -q
--- a/tests/test-commit-amend.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-commit-amend.t	Thu Mar 18 18:24:59 2021 -0400
@@ -209,7 +209,7 @@
   transaction abort!
   rollback completed
   abort: pretxncommit.test-saving-last-message hook exited with status 1
-  [255]
+  [40]
   $ cat .hg/last-message.txt
   message given from command line (no-eol)
 
@@ -234,7 +234,7 @@
   transaction abort!
   rollback completed
   abort: pretxncommit.test-saving-last-message hook exited with status 1
-  [255]
+  [40]
 
   $ cat .hg/last-message.txt
   another precious commit message
--- a/tests/test-completion.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-completion.t	Thu Mar 18 18:24:59 2021 -0400
@@ -38,6 +38,7 @@
   paths
   phase
   pull
+  purge
   push
   recover
   remove
@@ -129,6 +130,7 @@
   debugrevspec
   debugserve
   debugsetparents
+  debugshell
   debugsidedata
   debugssl
   debugstrip
@@ -270,7 +272,7 @@
   debugbuilddag: mergeable-file, overwritten-file, new-file
   debugbundle: all, part-type, spec
   debugcapabilities: 
-  debugchangedfiles: 
+  debugchangedfiles: compute
   debugcheckstate: 
   debugcolor: style
   debugcommands: 
@@ -281,7 +283,7 @@
   debugdate: extended
   debugdeltachain: changelog, manifest, dir, template
   debugdirstate: nodates, dates, datesort
-  debugdiscovery: old, nonheads, rev, seed, ssh, remotecmd, insecure
+  debugdiscovery: old, nonheads, rev, seed, local-as-revs, remote-as-revs, ssh, remotecmd, insecure, template
   debugdownload: output
   debugextensions: template
   debugfileset: rev, all-files, show-matcher, show-stage
@@ -318,6 +320,7 @@
   debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
   debugserve: sshstdio, logiofd, logiofile
   debugsetparents: 
+  debugshell: 
   debugsidedata: changelog, manifest, dir
   debugssl: 
   debugstrip: rev, force, no-backup, nobackup, , keep, bookmark, soft
@@ -354,6 +357,7 @@
   paths: template
   phase: public, draft, secret, force, rev
   pull: update, force, confirm, rev, bookmark, branch, ssh, remotecmd, insecure
+  purge: abort-on-err, all, ignored, dirs, files, print, print0, confirm, include, exclude
   push: force, rev, bookmark, all-bookmarks, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
   recover: verify
   remove: after, force, subrepos, include, exclude, dry-run
--- a/tests/test-config.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-config.t	Thu Mar 18 18:24:59 2021 -0400
@@ -3,6 +3,8 @@
 
 Invalid syntax: no value
 
+TODO: add rhg support for detailed exit codes
+#if no-rhg
   $ cat > .hg/hgrc << EOF
   > novaluekey
   > EOF
@@ -35,6 +37,7 @@
   $ hg showconfig
   config error at $TESTTMP/.hg/hgrc:1: unexpected leading whitespace:  [section]
   [30]
+#endif
 
 Reset hgrc
 
@@ -388,3 +391,114 @@
   > done
   $ HGRCPATH=configs hg config section.key
   99
+
+Configuration priority
+======================
+
+setup necessary file
+
+  $ cat > file-A.rc << EOF
+  > [config-test]
+  > basic = value-A
+  > pre-include= value-A
+  > %include ./included.rc
+  > post-include= value-A
+  > [command-templates]
+  > log = "value-A\n"
+  > EOF
+
+  $ cat > file-B.rc << EOF
+  > [config-test]
+  > basic = value-B
+  > [ui]
+  > logtemplate = "value-B\n"
+  > EOF
+
+
+  $ cat > included.rc << EOF
+  > [config-test]
+  > pre-include= value-included
+  > post-include= value-included
+  > EOF
+
+  $ cat > file-C.rc << EOF
+  > %include ./included-alias-C.rc
+  > [ui]
+  > logtemplate = "value-C\n"
+  > EOF
+
+  $ cat > included-alias-C.rc << EOF
+  > [command-templates]
+  > log = "value-included\n"
+  > EOF
+
+
+  $ cat > file-D.rc << EOF
+  > [command-templates]
+  > log = "value-D\n"
+  > %include ./included-alias-D.rc
+  > EOF
+
+  $ cat > included-alias-D.rc << EOF
+  > [ui]
+  > logtemplate = "value-included\n"
+  > EOF
+
+Simple order checking
+---------------------
+
+If file B is read after file A, value from B overwrite value from A.
+
+  $ HGRCPATH="file-A.rc:file-B.rc" hg config config-test.basic
+  value-B
+
+Ordering from include
+---------------------
+
+value from an include overwrite value defined before the include, but not the one defined after the include
+
+  $ HGRCPATH="file-A.rc" hg config config-test.pre-include
+  value-included
+  $ HGRCPATH="file-A.rc" hg config config-test.post-include
+  value-A
+
+command line override
+---------------------
+
+  $ HGRCPATH="file-A.rc:file-B.rc" hg config config-test.basic --config config-test.basic=value-CLI
+  value-CLI
+
+Alias ordering
+--------------
+
+The official config is now `command-templates.log`, the historical
+`ui.logtemplate` is a valid alternative for it.
+
+When both are defined, The config value read the last "win", this should keep
+being true if the config have other alias. In other word, the config value read
+earlier will be considered "lower level" and the config read later would be
+considered "higher level". And higher level values wins.
+
+  $ HGRCPATH="file-A.rc" hg log -r .
+  value-A
+  $ HGRCPATH="file-B.rc" hg log -r .
+  value-B
+  $ HGRCPATH="file-A.rc:file-B.rc" hg log -r .
+  value-B
+
+Alias and include
+-----------------
+
+The pre/post include priority should also apply when tie-breaking alternatives.
+See the case above for details about the two config options used.
+
+  $ HGRCPATH="file-C.rc" hg log -r .
+  value-C
+  $ HGRCPATH="file-D.rc" hg log -r .
+  value-included
+
+command line override
+---------------------
+
+  $ HGRCPATH="file-A.rc:file-B.rc" hg log -r . --config ui.logtemplate="value-CLI\n"
+  value-CLI
--- a/tests/test-contrib-perf.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-contrib-perf.t	Thu Mar 18 18:24:59 2021 -0400
@@ -78,111 +78,137 @@
   
   list of commands:
   
-   perfaddremove
+   perf::addremove
+                 (no help text available)
+   perf::ancestors
                  (no help text available)
-   perfancestors
+   perf::ancestorset
                  (no help text available)
-   perfancestorset
+   perf::annotate
                  (no help text available)
-   perfannotate  (no help text available)
-   perfbdiff     benchmark a bdiff between revisions
-   perfbookmarks
+   perf::bdiff   benchmark a bdiff between revisions
+   perf::bookmarks
                  benchmark parsing bookmarks from disk to memory
-   perfbranchmap
+   perf::branchmap
                  benchmark the update of a branchmap
-   perfbranchmapload
+   perf::branchmapload
                  benchmark reading the branchmap
-   perfbranchmapupdate
+   perf::branchmapupdate
                  benchmark branchmap update from for <base> revs to <target>
                  revs
-   perfbundleread
+   perf::bundleread
                  Benchmark reading of bundle files.
-   perfcca       (no help text available)
-   perfchangegroupchangelog
+   perf::cca     (no help text available)
+   perf::changegroupchangelog
                  Benchmark producing a changelog group for a changegroup.
-   perfchangeset
+   perf::changeset
+                 (no help text available)
+   perf::ctxfiles
                  (no help text available)
-   perfctxfiles  (no help text available)
-   perfdiffwd    Profile diff of working directory changes
-   perfdirfoldmap
+   perf::diffwd  Profile diff of working directory changes
+   perf::dirfoldmap
                  benchmap a 'dirstate._map.dirfoldmap.get()' request
-   perfdirs      (no help text available)
-   perfdirstate  benchmap the time of various distate operations
-   perfdirstatedirs
+   perf::dirs    (no help text available)
+   perf::dirstate
+                 benchmap the time of various distate operations
+   perf::dirstatedirs
                  benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
-   perfdirstatefoldmap
+   perf::dirstatefoldmap
                  benchmap a 'dirstate._map.filefoldmap.get()' request
-   perfdirstatewrite
+   perf::dirstatewrite
                  benchmap the time it take to write a dirstate on disk
-   perfdiscovery
+   perf::discovery
                  benchmark discovery between local repo and the peer at given
                  path
-   perffncacheencode
+   perf::fncacheencode
                  (no help text available)
-   perffncacheload
+   perf::fncacheload
                  (no help text available)
-   perffncachewrite
+   perf::fncachewrite
                  (no help text available)
-   perfheads     benchmark the computation of a changelog heads
-   perfhelper-mergecopies
+   perf::heads   benchmark the computation of a changelog heads
+   perf::helper-mergecopies
                  find statistics about potential parameters for
                  'perfmergecopies'
-   perfhelper-pathcopies
+   perf::helper-pathcopies
                  find statistic about potential parameters for the
                  'perftracecopies'
-   perfignore    benchmark operation related to computing ignore
-   perfindex     benchmark index creation time followed by a lookup
-   perflinelogedits
+   perf::ignore  benchmark operation related to computing ignore
+   perf::index   benchmark index creation time followed by a lookup
+   perf::linelogedits
                  (no help text available)
-   perfloadmarkers
+   perf::loadmarkers
                  benchmark the time to parse the on-disk markers for a repo
-   perflog       (no help text available)
-   perflookup    (no help text available)
-   perflrucachedict
+   perf::log     (no help text available)
+   perf::lookup  (no help text available)
+   perf::lrucachedict
                  (no help text available)
-   perfmanifest  benchmark the time to read a manifest from disk and return a
+   perf::manifest
+                 benchmark the time to read a manifest from disk and return a
                  usable
-   perfmergecalculate
+   perf::mergecalculate
                  (no help text available)
-   perfmergecopies
+   perf::mergecopies
                  measure runtime of 'copies.mergecopies'
-   perfmoonwalk  benchmark walking the changelog backwards
-   perfnodelookup
+   perf::moonwalk
+                 benchmark walking the changelog backwards
+   perf::nodelookup
                  (no help text available)
-   perfnodemap   benchmark the time necessary to look up revision from a cold
+   perf::nodemap
+                 benchmark the time necessary to look up revision from a cold
                  nodemap
-   perfparents   benchmark the time necessary to fetch one changeset's parents.
-   perfpathcopies
+   perf::parents
+                 benchmark the time necessary to fetch one changeset's parents.
+   perf::pathcopies
                  benchmark the copy tracing logic
-   perfphases    benchmark phasesets computation
-   perfphasesremote
+   perf::phases  benchmark phasesets computation
+   perf::phasesremote
                  benchmark time needed to analyse phases of the remote server
-   perfprogress  printing of progress bars
-   perfrawfiles  (no help text available)
-   perfrevlogchunks
+   perf::progress
+                 printing of progress bars
+   perf::rawfiles
+                 (no help text available)
+   perf::revlogchunks
                  Benchmark operations on revlog chunks.
-   perfrevlogindex
+   perf::revlogindex
                  Benchmark operations against a revlog index.
-   perfrevlogrevision
+   perf::revlogrevision
                  Benchmark obtaining a revlog revision.
-   perfrevlogrevisions
+   perf::revlogrevisions
                  Benchmark reading a series of revisions from a revlog.
-   perfrevlogwrite
+   perf::revlogwrite
                  Benchmark writing a series of revisions to a revlog.
-   perfrevrange  (no help text available)
-   perfrevset    benchmark the execution time of a revset
-   perfstartup   (no help text available)
-   perfstatus    benchmark the performance of a single status call
-   perftags      (no help text available)
-   perftemplating
+   perf::revrange
+                 (no help text available)
+   perf::revset  benchmark the execution time of a revset
+   perf::startup
+                 (no help text available)
+   perf::status  benchmark the performance of a single status call
+   perf::tags    (no help text available)
+   perf::templating
                  test the rendering time of a given template
-   perfunidiff   benchmark a unified diff between revisions
-   perfvolatilesets
+   perf::unidiff
+                 benchmark a unified diff between revisions
+   perf::volatilesets
                  benchmark the computation of various volatile set
-   perfwalk      (no help text available)
-   perfwrite     microbenchmark ui.write (and others)
+   perf::walk    (no help text available)
+   perf::write   microbenchmark ui.write (and others)
   
   (use 'hg help -v perf' to show built-in aliases and global options)
+
+  $ hg help perfaddremove
+  hg perf::addremove
+  
+  aliases: perfaddremove
+  
+  (no help text available)
+  
+  options:
+  
+   -T --template TEMPLATE display with template
+  
+  (some details hidden, use --verbose to show complete help)
+
   $ hg perfaddremove
   $ hg perfancestors
   $ hg perfancestorset 2
--- a/tests/test-convert-filemap.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-convert-filemap.t	Thu Mar 18 18:24:59 2021 -0400
@@ -292,12 +292,12 @@
   $ rm -rf source/.hg/store/data/dir/file4
 #endif
   $ hg -q convert --filemap renames.fmap --datesort source dummydest
-  abort: data/dir/file3.i@e96dce0bc6a2: no match found (reporevlogstore !)
+  abort: data/dir/file3.i@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
   abort: data/dir/file3/index@e96dce0bc6a2: no node (reposimplestore !)
   [50]
   $ hg -q convert --filemap renames.fmap --datesort --config convert.hg.ignoreerrors=1 source renames.repo
-  ignoring: data/dir/file3.i@e96dce0bc6a2: no match found (reporevlogstore !)
-  ignoring: data/dir/file4.i@6edd55f559cd: no match found (reporevlogstore !)
+  ignoring: data/dir/file3.i@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
+  ignoring: data/dir/file4.i@6edd55f559cdce67132b12ca09e09cee08b60442: no match found (reporevlogstore !)
   ignoring: data/dir/file3/index@e96dce0bc6a2: no node (reposimplestore !)
   ignoring: data/dir/file4/index@6edd55f559cd: no node (reposimplestore !)
   $ hg up -q -R renames.repo
--- a/tests/test-convert-hg-source.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-convert-hg-source.t	Thu Mar 18 18:24:59 2021 -0400
@@ -182,7 +182,7 @@
   sorting...
   converting...
   4 init
-  ignoring: data/b.i@1e88685f5dde: no match found (reporevlogstore !)
+  ignoring: data/b.i@1e88685f5ddec574a34c70af492f95b6debc8741: no match found (reporevlogstore !)
   ignoring: data/b/index@1e88685f5dde: no node (reposimplestore !)
   3 changeall
   2 changebagain
--- a/tests/test-convert-svn-sink.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-convert-svn-sink.t	Thu Mar 18 18:24:59 2021 -0400
@@ -54,10 +54,12 @@
    2 2 test a
   revision: 2
   author: test
+  date: * (glob)
   msg: modify a file
    M /a
   revision: 1
   author: test
+  date: * (glob)
   msg: add a file
    A /a
    A /d1
@@ -95,6 +97,7 @@
    3 3 test b
   revision: 3
   author: test
+  date: * (glob)
   msg: rename a file
    D /a
    A /b (from /a@2)
@@ -131,6 +134,7 @@
    4 4 test c
   revision: 4
   author: test
+  date: * (glob)
   msg: copy a file
    A /c (from /b@3)
   $ ls a a-hg-wc
@@ -167,6 +171,7 @@
    5 5 test .
   revision: 5
   author: test
+  date: * (glob)
   msg: remove a file
    D /b
   $ ls a a-hg-wc
@@ -209,6 +214,7 @@
    6 6 test c
   revision: 6
   author: test
+  date: * (glob)
   msg: make a file executable
    M /c
 #if execbit
@@ -247,6 +253,7 @@
    8 8 test newlink
   revision: 8
   author: test
+  date: * (glob)
   msg: move symlink
    D /link
    A /newlink (from /link@7)
@@ -278,6 +285,7 @@
    7 7 test f
   revision: 7
   author: test
+  date: * (glob)
   msg: f
    D /c
    A /d
@@ -315,6 +323,7 @@
    1 1 test d1/a
   revision: 1
   author: test
+  date: * (glob)
   msg: add executable file in new directory
    A /d1
    A /d1/a
@@ -343,6 +352,7 @@
    2 2 test d2/a
   revision: 2
   author: test
+  date: * (glob)
   msg: copy file to new directory
    A /d2
    A /d2/a (from /d1/a@1)
@@ -416,21 +426,25 @@
    4 4 test right-2
   revision: 4
   author: test
+  date: * (glob)
   msg: merge
    A /right-1
    A /right-2
   revision: 3
   author: test
+  date: * (glob)
   msg: left-2
    M /b
    A /left-2
   revision: 2
   author: test
+  date: * (glob)
   msg: left-1
    M /b
    A /left-1
   revision: 1
   author: test
+  date: * (glob)
   msg: base
    A /b
 
@@ -459,10 +473,12 @@
    2 2 test .hgtags
   revision: 2
   author: test
+  date: * (glob)
   msg: Tagged as v1.0
    A /.hgtags
   revision: 1
   author: test
+  date: * (glob)
   msg: Add file a
    A /a
   $ rm -rf a a-hg a-hg-wc
@@ -494,10 +510,12 @@
    2 2 test exec
   revision: 2
   author: test
+  date: * (glob)
   msg: remove executable bit
    M /exec
   revision: 1
   author: test
+  date: * (glob)
   msg: create executable
    A /exec
   $ test ! -x a-hg-wc/exec
@@ -540,11 +558,77 @@
    2 2 test b
   revision: 2
   author: test
+  date: * (glob)
   msg: Another change
    A /b
   revision: 1
   author: test
+  date: * (glob)
   msg: Some change
    A /a
 
   $ rm -rf a a-hg a-hg-wc
+
+Commit dates convertion
+
+  $ hg init a
+
+  $ echo a >> a/a
+  $ hg add a
+  adding a/a
+  $ hg --cwd a ci -d '1 0' -A -m 'Change 1'
+
+  $ echo a >> a/a
+  $ hg --cwd a ci -d '2 0' -m 'Change 2'
+
+  $ echo a >> a/a
+  $ hg --cwd a ci -d '2 0' -m 'Change at the same time'
+
+  $ echo a >> a/a
+  $ hg --cwd a ci -d '1 0' -m 'Change in the past'
+
+  $ echo a >> a/a
+  $ hg --cwd a ci -d '3 0' -m 'Change in the future'
+
+  $ hg convert --config convert.svn.dangerous-set-commit-dates=true -d svn a
+  assuming destination a-hg
+  initializing svn repository 'a-hg'
+  initializing svn working copy 'a-hg-wc'
+  scanning source...
+  sorting...
+  converting...
+  4 Change 1
+  3 Change 2
+  2 Change at the same time
+  1 Change in the past
+  0 Change in the future
+  $ svnupanddisplay a-hg-wc 0
+   5 5 test .
+   5 5 test a
+  revision: 5
+  author: test
+  date: 1970-01-01T00:00:03.000000Z
+  msg: Change in the future
+   M /a
+  revision: 4
+  author: test
+  date: 1970-01-01T00:00:01.000000Z
+  msg: Change in the past
+   M /a
+  revision: 3
+  author: test
+  date: 1970-01-01T00:00:02.000000Z
+  msg: Change at the same time
+   M /a
+  revision: 2
+  author: test
+  date: 1970-01-01T00:00:02.000000Z
+  msg: Change 2
+   M /a
+  revision: 1
+  author: test
+  date: 1970-01-01T00:00:01.000000Z
+  msg: Change 1
+   A /a
+
+  $ rm -rf a a-hg a-hg-wc
--- a/tests/test-convert.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-convert.t	Thu Mar 18 18:24:59 2021 -0400
@@ -388,6 +388,23 @@
                     does not convert tags from the source repo to the target
                     repo. The default is False.
   
+      Subversion Destination
+      ######################
+  
+      Original commit dates are not preserved by default.
+  
+      convert.svn.dangerous-set-commit-dates
+                    preserve original commit dates, forcefully setting
+                    "svn:date" revision properties. This option is DANGEROUS and
+                    may break some subversion functionality for the resulting
+                    repository (e.g. filtering revisions with date ranges in
+                    "svn log"), as original commit dates are not guaranteed to
+                    be monotonically increasing.
+  
+      For commit dates setting to work destination repository must have "pre-
+      revprop-change" hook configured to allow setting of "svn:date" revision
+      properties. See Subversion documentation for more details.
+  
   options ([+] can be repeated):
   
    -s --source-type TYPE source repository type
--- a/tests/test-copies-chain-merge.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-copies-chain-merge.t	Thu Mar 18 18:24:59 2021 -0400
@@ -1,4 +1,4 @@
-#testcases filelog compatibility changeset sidedata upgraded
+#testcases filelog compatibility changeset sidedata upgraded upgraded-parallel
 
 =====================================================
 Test Copy tracing for chain of copies involving merge
@@ -14,11 +14,24 @@
 
 use git diff to see rename
 
+  $ cat << EOF >> ./no-linkrev
+  > #!$PYTHON
+  > # filter out linkrev part of the debugindex command
+  > import sys
+  > for line in sys.stdin:
+  >     if " linkrev " in line:
+  >         print(line.rstrip())
+  >     else:
+  >         l = "%s       *%s" % (line[:6], line[14:].rstrip())
+  >         print(l)
+  > EOF
+  $ chmod +x no-linkrev
+
   $ cat << EOF >> $HGRCPATH
   > [diff]
   > git=yes
   > [command-templates]
-  > log={rev} {desc}\n
+  > log={desc}\n
   > EOF
 
 #if compatibility
@@ -45,28 +58,45 @@
 #endif
 
 
+  $ cat > same-content.txt << EOF
+  > Here is some content that will be the same accros multiple file.
+  > 
+  > This is done on purpose so that we end up in some merge situation, were the
+  > resulting content is the same as in the parent(s), but a new filenodes still
+  > need to be created to record some file history information (especially
+  > about copies).
+  > EOF
+
   $ hg init repo-chain
   $ cd repo-chain
 
 Add some linear rename initialy
 
-  $ echo a > a
-  $ echo b > b
-  $ echo h > h
-  $ hg ci -Am 'i-0 initial commit: a b h'
+  $ cp ../same-content.txt a
+  $ cp ../same-content.txt b
+  $ cp ../same-content.txt h
+  $ echo "original content for P" > p
+  $ echo "original content for Q" > q
+  $ echo "original content for R" > r
+  $ hg ci -Am 'i-0 initial commit: a b h p q r'
   adding a
   adding b
   adding h
+  adding p
+  adding q
+  adding r
   $ hg mv a c
-  $ hg ci -Am 'i-1: a -move-> c'
+  $ hg mv p s
+  $ hg ci -Am 'i-1: a -move-> c, p -move-> s'
   $ hg mv c d
-  $ hg ci -Am 'i-2: c -move-> d'
+  $ hg mv s t
+  $ hg ci -Am 'i-2: c -move-> d, s -move-> t'
   $ hg log -G
-  @  2 i-2: c -move-> d
+  @  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
 And having another branch with renames on the other side
@@ -76,15 +106,15 @@
   $ hg mv e f
   $ hg ci -Am 'a-2: e -move-> f'
   $ hg log -G --rev '::.'
-  @  4 a-2: e -move-> f
+  @  a-2: e -move-> f
   |
-  o  3 a-1: d -move-> e
+  o  a-1: d -move-> e
   |
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
 Have a branching with nothing on one side
@@ -95,13 +125,13 @@
   $ hg ci -m 'b-1: b update'
   created new head
   $ hg log -G --rev '::.'
-  @  5 b-1: b update
+  @  b-1: b update
   |
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
 Create a branch that delete a file previous renamed
@@ -112,13 +142,13 @@
   $ hg ci -m 'c-1 delete d'
   created new head
   $ hg log -G --rev '::.'
-  @  6 c-1 delete d
+  @  c-1 delete d
   |
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
 Create a branch that delete a file previous renamed and recreate it
@@ -132,15 +162,15 @@
   $ hg add d
   $ hg ci -m 'd-2 re-add d'
   $ hg log -G --rev '::.'
-  @  8 d-2 re-add d
+  @  d-2 re-add d
   |
-  o  7 d-1 delete d
+  o  d-1 delete d
   |
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
 Having another branch renaming a different file to the same filename as another
@@ -153,16 +183,61 @@
   $ hg mv g f
   $ hg ci -m 'e-2 g -move-> f'
   $ hg log -G --rev '::.'
-  @  10 e-2 g -move-> f
+  @  e-2 g -move-> f
+  |
+  o  e-1 b -move-> g
+  |
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  9 e-1 b -move-> g
+  o  i-0 initial commit: a b h p q r
+  
+  $ hg up -q null
+
+Having a branch similar to the 'a' one, but moving the 'p' file around.
+
+  $ hg up 'desc("i-2")'
+  6 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg mv t u
+  $ hg ci -Am 'p-1: t -move-> u'
+  created new head
+  $ hg mv u v
+  $ hg ci -Am 'p-2: u -move-> v'
+  $ hg log -G --rev '::.'
+  @  p-2: u -move-> v
+  |
+  o  p-1: t -move-> u
+  |
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  2 i-2: c -move-> d
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  1 i-1: a -move-> c
+  o  i-0 initial commit: a b h p q r
+  
+  $ hg up -q null
+
+Having another branch renaming a different file to the same filename as another
+
+  $ hg up 'desc("i-2")'
+  6 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg mv r w
+  $ hg ci -m 'q-1 r -move-> w'
+  created new head
+  $ hg mv w v
+  $ hg ci -m 'q-2 w -move-> v'
+  $ hg log -G --rev '::.'
+  @  q-2 w -move-> v
   |
-  o  0 i-0 initial commit: a b h
+  o  q-1 r -move-> w
+  |
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
+  |
+  o  i-0 initial commit: a b h p q r
   
+  $ hg up -q null
 
 Setup all merge
 ===============
@@ -176,35 +251,37 @@
 - rename on one side
 - unrelated change on the other side
 
+  $ case_desc="simple merge - A side: multiple renames, B side: unrelated update"
+
   $ hg up 'desc("b-1")'
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  6 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg merge 'desc("a-2")'
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
-  $ hg ci -m 'mBAm-0 simple merge - one way'
+  $ hg ci -m "mBAm-0 $case_desc - one way"
   $ hg up 'desc("a-2")'
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg merge 'desc("b-1")'
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
-  $ hg ci -m 'mABm-0 simple merge - the other way'
+  $ hg ci -m "mABm-0 $case_desc - the other way"
   created new head
   $ hg log -G --rev '::(desc("mABm")+desc("mBAm"))'
-  @    12 mABm-0 simple merge - the other way
+  @    mABm-0 simple merge - A side: multiple renames, B side: unrelated update - the other way
   |\
-  +---o  11 mBAm-0 simple merge - one way
+  +---o  mBAm-0 simple merge - A side: multiple renames, B side: unrelated update - one way
   | |/
-  | o  5 b-1: b update
+  | o  b-1: b update
   | |
-  o |  4 a-2: e -move-> f
+  o |  a-2: e -move-> f
   | |
-  o |  3 a-1: d -move-> e
+  o |  a-1: d -move-> e
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
 
@@ -216,12 +293,14 @@
 - one deleting the change
 and recreate an unrelated file after the merge
 
+  $ case_desc="simple merge - C side: delete a file with copies history , B side: unrelated update"
+
   $ hg up 'desc("b-1")'
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg merge 'desc("c-1")'
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
-  $ hg ci -m 'mBCm-0 simple merge - one way'
+  $ hg ci -m "mBCm-0 $case_desc - one way"
   $ echo bar > d
   $ hg add d
   $ hg ci -m 'mBCm-1 re-add d'
@@ -230,29 +309,29 @@
   $ hg merge 'desc("b-1")'
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
-  $ hg ci -m 'mCBm-0 simple merge - the other way'
+  $ hg ci -m "mCBm-0 $case_desc - the other way"
   created new head
   $ echo bar > d
   $ hg add d
   $ hg ci -m 'mCBm-1 re-add d'
   $ hg log -G --rev '::(desc("mCBm")+desc("mBCm"))'
-  @  16 mCBm-1 re-add d
+  @  mCBm-1 re-add d
   |
-  o    15 mCBm-0 simple merge - the other way
+  o    mCBm-0 simple merge - C side: delete a file with copies history , B side: unrelated update - the other way
   |\
-  | | o  14 mBCm-1 re-add d
+  | | o  mBCm-1 re-add d
   | | |
-  +---o  13 mBCm-0 simple merge - one way
+  +---o  mBCm-0 simple merge - C side: delete a file with copies history , B side: unrelated update - one way
   | |/
-  | o  6 c-1 delete d
+  | o  c-1 delete d
   | |
-  o |  5 b-1: b update
+  o |  b-1: b update
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
 Comparing with a merge re-adding the file afterward
@@ -262,84 +341,139 @@
 - one with change to an unrelated file
 - one deleting and recreating the change
 
+  $ case_desc="simple merge - B side: unrelated update, D side: delete and recreate a file (with different content)"
+
   $ hg up 'desc("b-1")'
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg merge 'desc("d-2")'
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
-  $ hg ci -m 'mBDm-0 simple merge - one way'
+  $ hg ci -m "mBDm-0 $case_desc - one way"
   $ hg up 'desc("d-2")'
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg merge 'desc("b-1")'
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
-  $ hg ci -m 'mDBm-0 simple merge - the other way'
+  $ hg ci -m "mDBm-0 $case_desc - the other way"
   created new head
   $ hg log -G --rev '::(desc("mDBm")+desc("mBDm"))'
-  @    18 mDBm-0 simple merge - the other way
+  @    mDBm-0 simple merge - B side: unrelated update, D side: delete and recreate a file (with different content) - the other way
   |\
-  +---o  17 mBDm-0 simple merge - one way
+  +---o  mBDm-0 simple merge - B side: unrelated update, D side: delete and recreate a file (with different content) - one way
   | |/
-  | o  8 d-2 re-add d
+  | o  d-2 re-add d
   | |
-  | o  7 d-1 delete d
+  | o  d-1 delete d
   | |
-  o |  5 b-1: b update
+  o |  b-1: b update
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
 
 Comparing with a merge with colliding rename
 --------------------------------------------
 
+Subcase: new copy information on both side
+``````````````````````````````````````````
+
 - the "e-" branch renaming b to f (through 'g')
 - the "a-" branch renaming d to f (through e)
 
+  $ case_desc="merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f)"
+
   $ hg up 'desc("a-2")'
   2 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ hg merge 'desc("e-2")' --tool :union
-  merging f
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("e-2")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
   (branch merge, don't forget to commit)
-  $ hg ci -m 'mAEm-0 simple merge - one way'
+  $ hg ci -m "mAEm-0 $case_desc - one way"
   $ hg up 'desc("e-2")'
-  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg merge 'desc("a-2")' --tool :union
-  merging f
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-changeset !)
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved (changeset !)
+  $ hg merge 'desc("a-2")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
   (branch merge, don't forget to commit)
-  $ hg ci -m 'mEAm-0 simple merge - the other way'
+  $ hg ci -m "mEAm-0 $case_desc - the other way"
   created new head
   $ hg log -G --rev '::(desc("mAEm")+desc("mEAm"))'
-  @    20 mEAm-0 simple merge - the other way
+  @    mEAm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - the other way
   |\
-  +---o  19 mAEm-0 simple merge - one way
+  +---o  mAEm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - one way
   | |/
-  | o  10 e-2 g -move-> f
+  | o  e-2 g -move-> f
   | |
-  | o  9 e-1 b -move-> g
+  | o  e-1 b -move-> g
   | |
-  o |  4 a-2: e -move-> f
+  o |  a-2: e -move-> f
   | |
-  o |  3 a-1: d -move-> e
+  o |  a-1: d -move-> e
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  1 i-1: a -move-> c
+  o  i-0 initial commit: a b h p q r
+  
+
+Subcase: new copy information on both side with an actual merge happening
+`````````````````````````````````````````````````````````````````````````
+
+- the "p-" branch renaming 't' to 'v' (through 'u')
+- the "q-" branch renaming 'r' to 'v' (through 'w')
+
+  $ case_desc="merge with copies info on both side - P side: rename t to v, Q side: r to v, (different content)"
+
+  $ hg up 'desc("p-2")'
+  3 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ hg merge 'desc("q-2")' --tool ':union'
+  merging v
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mPQm-0 $case_desc - one way"
+  $ hg up 'desc("q-2")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("p-2")' --tool ':union'
+  merging v
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mQPm-0 $case_desc - the other way"
+  created new head
+  $ hg log -G --rev '::(desc("mAEm")+desc("mEAm"))'
+  o    mEAm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - the other way
+  |\
+  +---o  mAEm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - one way
+  | |/
+  | o  e-2 g -move-> f
+  | |
+  | o  e-1 b -move-> g
+  | |
+  o |  a-2: e -move-> f
+  | |
+  o |  a-1: d -move-> e
+  |/
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  0 i-0 initial commit: a b h
+  o  i-1: a -move-> c, p -move-> s
+  |
+  o  i-0 initial commit: a b h p q r
   
 
+Subcase: existing copy information overwritten on one branch
+````````````````````````````````````````````````````````````
 
 Merge:
 - one with change to an unrelated file (b)
 - one overwriting a file (d) with a rename (from h to i to d)
 
+  $ case_desc="simple merge - B side: unrelated change, F side: overwrite d with a copy (from h->i->d)"
+
   $ hg up 'desc("i-2")'
   2 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg mv h i
@@ -347,45 +481,104 @@
   created new head
   $ hg mv --force i d
   $ hg commit -m "f-2: rename i -> d"
-  $ hg debugindex d
+  $ hg debugindex d | ../no-linkrev
      rev linkrev nodeid       p1           p2
-       0       2 169be882533b 000000000000 000000000000 (no-changeset !)
-       0       2 b789fdd96dc2 000000000000 000000000000 (changeset !)
-       1       8 b004912a8510 000000000000 000000000000
-       2      22 4a067cf8965d 000000000000 000000000000 (no-changeset !)
-       2      22 fe6f8b4f507f 000000000000 000000000000 (changeset !)
+       0       * d8252ab2e760 000000000000 000000000000 (no-changeset !)
+       0       * ae258f702dfe 000000000000 000000000000 (changeset !)
+       1       * b004912a8510 000000000000 000000000000
+       2       * 7b79e2fe0c89 000000000000 000000000000 (no-changeset !)
   $ hg up 'desc("b-1")'
-  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  3 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-changeset !)
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved (changeset !)
   $ hg merge 'desc("f-2")'
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
   (branch merge, don't forget to commit)
-  $ hg ci -m 'mBFm-0 simple merge - one way'
+  $ hg ci -m "mBFm-0 $case_desc - one way"
   $ hg up 'desc("f-2")'
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg merge 'desc("b-1")'
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
-  $ hg ci -m 'mFBm-0 simple merge - the other way'
+  $ hg ci -m "mFBm-0 $case_desc - the other way"
   created new head
+  $ hg up null --quiet
   $ hg log -G --rev '::(desc("mBFm")+desc("mFBm"))'
-  @    24 mFBm-0 simple merge - the other way
+  o    mFBm-0 simple merge - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - the other way
   |\
-  +---o  23 mBFm-0 simple merge - one way
+  +---o  mBFm-0 simple merge - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - one way
   | |/
-  | o  22 f-2: rename i -> d
+  | o  f-2: rename i -> d
+  | |
+  | o  f-1: rename h -> i
   | |
-  | o  21 f-1: rename h -> i
-  | |
-  o |  5 b-1: b update
+  o |  b-1: b update
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  1 i-1: a -move-> c
+  o  i-0 initial commit: a b h p q r
+  
+
+Subcase: existing copy information overwritten on one branch, with different content)
+`````````````````````````````````````````````````````````````````````````````````````
+
+Merge:
+- one with change to an unrelated file (b)
+- one overwriting a file (t) with a rename (from r to x to t), v content is not the same as on the other branch
+
+  $ case_desc="simple merge - B side: unrelated change, R side: overwrite d with a copy (from r->x->t) different content"
+
+  $ hg up 'desc("i-2")'
+  6 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg mv r x
+  $ hg commit -m "r-1: rename r -> x"
+  created new head
+  $ hg mv --force x t
+  $ hg commit -m "r-2: rename t -> x"
+  $ hg debugindex t | ../no-linkrev
+     rev linkrev nodeid       p1           p2
+       0       * d74efbf65309 000000000000 000000000000 (no-changeset !)
+       1       * 02a930b9d7ad 000000000000 000000000000 (no-changeset !)
+       0       * 5aed6a8dbff0 000000000000 000000000000 (changeset !)
+       1       * a38b2fa17021 000000000000 000000000000 (changeset !)
+  $ hg up 'desc("b-1")'
+  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("r-2")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mBRm-0 $case_desc - one way"
+  $ hg up 'desc("r-2")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("b-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mRBm-0 $case_desc - the other way"
+  created new head
+  $ hg up null --quiet
+  $ hg log -G --rev '::(desc("mBRm")+desc("mRBm"))'
+  o    mRBm-0 simple merge - B side: unrelated change, R side: overwrite d with a copy (from r->x->t) different content - the other way
+  |\
+  +---o  mBRm-0 simple merge - B side: unrelated change, R side: overwrite d with a copy (from r->x->t) different content - one way
+  | |/
+  | o  r-2: rename t -> x
+  | |
+  | o  r-1: rename r -> x
+  | |
+  o |  b-1: b update
+  |/
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  0 i-0 initial commit: a b h
+  o  i-1: a -move-> c, p -move-> s
+  |
+  o  i-0 initial commit: a b h p q r
   
 
 
+Subcase: reset of the copy history on one side
+``````````````````````````````````````````````
+
 Merge:
 - one with change to a file
 - one deleting and recreating the file
@@ -393,8 +586,10 @@
 Unlike in the 'BD/DB' cases, an actual merge happened here. So we should
 consider history and rename on both branch of the merge.
 
+  $ case_desc="actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content"
+
   $ hg up 'desc("i-2")'
-  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  6 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ echo "some update" >> d
   $ hg commit -m "g-1: update d"
   created new head
@@ -404,33 +599,35 @@
   merging d
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
-  $ hg ci -m 'mDGm-0 simple merge - one way'
+  $ hg ci -m "mDGm-0 $case_desc - one way"
   $ hg up 'desc("g-1")'
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg merge 'desc("d-2")' --tool :union
   merging d
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
-  $ hg ci -m 'mGDm-0 simple merge - the other way'
+  $ hg ci -m "mGDm-0 $case_desc - the other way"
   created new head
   $ hg log -G --rev '::(desc("mDGm")+desc("mGDm"))'
-  @    27 mGDm-0 simple merge - the other way
+  @    mGDm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - the other way
   |\
-  +---o  26 mDGm-0 simple merge - one way
+  +---o  mDGm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - one way
   | |/
-  | o  25 g-1: update d
+  | o  g-1: update d
   | |
-  o |  8 d-2 re-add d
+  o |  d-2 re-add d
   | |
-  o |  7 d-1 delete d
+  o |  d-1 delete d
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  1 i-1: a -move-> c
-  |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
+Subcase: merging a change to a file with a "copy overwrite" to that file from another branch
+````````````````````````````````````````````````````````````````````````````````````````````
 
 Merge:
 - one with change to a file (d)
@@ -445,38 +642,43 @@
 |
 | The current code arbitrarily pick one side
 
+  $ case_desc="merge - G side: content change, F side: copy overwrite, no content change"
+
   $ hg up 'desc("f-2")'
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg merge 'desc("g-1")' --tool :union
-  merging d
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  merging d (no-changeset !)
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved (no-changeset !)
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved (changeset !)
   (branch merge, don't forget to commit)
-  $ hg ci -m 'mFGm-0 simple merge - one way'
+  $ hg ci -m "mFGm-0 $case_desc - one way"
   created new head
   $ hg up 'desc("g-1")'
-  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-changeset !)
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved (changeset !)
   $ hg merge 'desc("f-2")' --tool :union
-  merging d
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  merging d (no-changeset !)
+  0 files updated, 1 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
   (branch merge, don't forget to commit)
-  $ hg ci -m 'mGFm-0 simple merge - the other way'
+  $ hg ci -m "mGFm-0 $case_desc - the other way"
   created new head
   $ hg log -G --rev '::(desc("mGFm")+desc("mFGm"))'
-  @    29 mGFm-0 simple merge - the other way
+  @    mGFm-0 merge - G side: content change, F side: copy overwrite, no content change - the other way
   |\
-  +---o  28 mFGm-0 simple merge - one way
+  +---o  mFGm-0 merge - G side: content change, F side: copy overwrite, no content change - one way
   | |/
-  | o  25 g-1: update d
+  | o  g-1: update d
   | |
-  o |  22 f-2: rename i -> d
+  o |  f-2: rename i -> d
   | |
-  o |  21 f-1: rename h -> i
+  o |  f-1: rename h -> i
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
 
@@ -491,6 +693,8 @@
 In this case, the file keep on living after the merge. So we should not drop its
 copy tracing chain.
 
+  $ case_desc="merge updated/deleted - revive the file (updated content)"
+
   $ hg up 'desc("c-1")'
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg merge 'desc("g-1")'
@@ -502,7 +706,7 @@
   [1]
   $ hg resolve -t :other d
   (no more unresolved files)
-  $ hg ci -m "mCGm-0"
+  $ hg ci -m "mCGm-0 $case_desc - one way"
   created new head
 
   $ hg up 'desc("g-1")'
@@ -516,23 +720,23 @@
   [1]
   $ hg resolve -t :local d
   (no more unresolved files)
-  $ hg ci -m "mGCm-0"
+  $ hg ci -m "mGCm-0 $case_desc - the other way"
   created new head
 
   $ hg log -G --rev '::(desc("mCGm")+desc("mGCm"))'
-  @    31 mGCm-0
+  @    mGCm-0 merge updated/deleted - revive the file (updated content) - the other way
   |\
-  +---o  30 mCGm-0
+  +---o  mCGm-0 merge updated/deleted - revive the file (updated content) - one way
   | |/
-  | o  25 g-1: update d
+  | o  g-1: update d
   | |
-  o |  6 c-1 delete d
+  o |  c-1 delete d
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
 
@@ -548,13 +752,15 @@
 In this case, the file keep on living after the merge. So we should not drop its
 copy tracing chain.
 
+  $ case_desc="merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge)"
+
   $ hg up 'desc("c-1")'
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg merge 'desc("b-1")'
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
   $ hg revert --rev 'desc("b-1")' d
-  $ hg ci -m "mCB-revert-m-0"
+  $ hg ci -m "mCB-revert-m-0 $case_desc - one way"
   created new head
 
   $ hg up 'desc("b-1")'
@@ -563,23 +769,23 @@
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
   $ hg revert --rev 'desc("b-1")' d
-  $ hg ci -m "mBC-revert-m-0"
+  $ hg ci -m "mBC-revert-m-0 $case_desc - the other way"
   created new head
 
   $ hg log -G --rev '::(desc("mCB-revert-m")+desc("mBC-revert-m"))'
-  @    33 mBC-revert-m-0
+  @    mBC-revert-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - the other way
   |\
-  +---o  32 mCB-revert-m-0
+  +---o  mCB-revert-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - one way
   | |/
-  | o  6 c-1 delete d
+  | o  c-1 delete d
   | |
-  o |  5 b-1: b update
+  o |  b-1: b update
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
 
@@ -593,43 +799,665 @@
 
 (the copy information from the branch that was not deleted should win).
 
+  $ case_desc="simple merge - C side: d is the results of renames then deleted, H side: d is result of another rename (same content as the other branch)"
+
   $ hg up 'desc("i-0")'
-  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  6 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg mv b d
   $ hg ci -m "h-1: b -(move)-> d"
   created new head
 
   $ hg up 'desc("c-1")'
-  1 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  2 files updated, 0 files merged, 3 files removed, 0 files unresolved
   $ hg merge 'desc("h-1")'
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
-  $ hg ci -m "mCH-delete-before-conflict-m-0"
+  $ hg ci -m "mCH-delete-before-conflict-m-0 $case_desc - one way"
 
   $ hg up 'desc("h-1")'
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  2 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg merge 'desc("c-1")'
-  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  1 files updated, 0 files merged, 2 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
-  $ hg ci -m "mHC-delete-before-conflict-m-0"
+  $ hg ci -m "mHC-delete-before-conflict-m-0 $case_desc - the other way"
   created new head
   $ hg log -G --rev '::(desc("mCH-delete-before-conflict-m")+desc("mHC-delete-before-conflict-m"))'
-  @    36 mHC-delete-before-conflict-m-0
+  @    mHC-delete-before-conflict-m-0 simple merge - C side: d is the results of renames then deleted, H side: d is result of another rename (same content as the other branch) - the other way
   |\
-  +---o  35 mCH-delete-before-conflict-m-0
+  +---o  mCH-delete-before-conflict-m-0 simple merge - C side: d is the results of renames then deleted, H side: d is result of another rename (same content as the other branch) - one way
+  | |/
+  | o  h-1: b -(move)-> d
+  | |
+  o |  c-1 delete d
+  | |
+  o |  i-2: c -move-> d, s -move-> t
+  | |
+  o |  i-1: a -move-> c, p -move-> s
+  |/
+  o  i-0 initial commit: a b h p q r
+  
+
+Variant of previous with extra changes introduced by the merge
+--------------------------------------------------------------
+
+Multiple cases above explicitely test cases where content are the same on both side during merge. In this section we will introduce variants for theses cases where new change are introduced to these file content during the merges.
+
+
+Subcase: merge has same initial content on both side, but merge introduced a change
+```````````````````````````````````````````````````````````````````````````````````
+
+Same as `mAEm` and `mEAm` but with extra change to the file before commiting
+
+- the "e-" branch renaming b to f (through 'g')
+- the "a-" branch renaming d to f (through e)
+
+  $ case_desc="merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent)"
+
+  $ hg up 'desc("a-2")'
+  2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("e-2")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
+  (branch merge, don't forget to commit)
+  $ echo "content change for mAE-change-m" > f
+  $ hg ci -m "mAE-change-m-0 $case_desc - one way"
+  created new head
+  $ hg up 'desc("e-2")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("a-2")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
+  (branch merge, don't forget to commit)
+  $ echo "content change for mEA-change-m" > f
+  $ hg ci -m "mEA-change-m-0 $case_desc - the other way"
+  created new head
+  $ hg log -G --rev '::(desc("mAE-change-m")+desc("mEA-change-m"))'
+  @    mEA-change-m-0 merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent) - the other way
+  |\
+  +---o  mAE-change-m-0 merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent) - one way
   | |/
-  | o  34 h-1: b -(move)-> d
+  | o  e-2 g -move-> f
+  | |
+  | o  e-1 b -move-> g
+  | |
+  o |  a-2: e -move-> f
+  | |
+  o |  a-1: d -move-> e
+  |/
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
+  |
+  o  i-0 initial commit: a b h p q r
+  
+
+Subcase: merge overwrite common copy information, but with extra change during the merge
+````````````````````````````````````````````````````````````````````````````````````````
+
+Merge:
+- one with change to an unrelated file (b)
+- one overwriting a file (d) with a rename (from h to i to d)
+- the merge update f content
+
+  $ case_desc="merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d)"
+
+  $ hg up 'desc("f-2")'
+  2 files updated, 0 files merged, 2 files removed, 0 files unresolved
+#if no-changeset
+  $ hg debugindex d | ../no-linkrev
+     rev linkrev nodeid       p1           p2
+       0       * d8252ab2e760 000000000000 000000000000
+       1       * b004912a8510 000000000000 000000000000
+       2       * 7b79e2fe0c89 000000000000 000000000000
+       3       * 17ec97e60577 d8252ab2e760 000000000000
+       4       * 06dabf50734c b004912a8510 17ec97e60577
+       5       * 19c0e3924691 17ec97e60577 b004912a8510
+       6       * 89c873a01d97 7b79e2fe0c89 17ec97e60577
+       7       * d55cb4e9ef57 000000000000 000000000000
+#else
+  $ hg debugindex d | ../no-linkrev
+     rev linkrev nodeid       p1           p2
+       0       * ae258f702dfe 000000000000 000000000000
+       1       * b004912a8510 000000000000 000000000000
+       2       * 5cce88bf349f ae258f702dfe 000000000000
+       3       * cc269dd788c8 b004912a8510 5cce88bf349f
+       4       * 51c91a115080 5cce88bf349f b004912a8510
+#endif
+  $ hg up 'desc("b-1")'
+  3 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-changeset !)
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved (changeset !)
+  $ hg merge 'desc("f-2")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
+  (branch merge, don't forget to commit)
+  $ echo "extra-change to (formelly h) during the merge" > d
+  $ hg ci -m "mBF-change-m-0 $case_desc - one way"
+  created new head
+  $ hg manifest --rev . --debug | grep "  d"
+  1c334238bd42ec85c6a0d83fd1b2a898a6a3215d 644   d (no-changeset !)
+  cea2d99c0fde64672ef61953786fdff34f16e230 644   d (changeset !)
+
+  $ hg up 'desc("f-2")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("b-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ echo "extra-change to (formelly h) during the merge" > d
+  $ hg ci -m "mFB-change-m-0 $case_desc - the other way"
+  created new head
+  $ hg manifest --rev . --debug | grep "  d"
+  1c334238bd42ec85c6a0d83fd1b2a898a6a3215d 644   d (no-changeset missing-correct-output !)
+  646ed7992dec41eb29635ab28268e7867d0e59a0 644   d (no-changeset known-bad-output !)
+  cea2d99c0fde64672ef61953786fdff34f16e230 644   d (changeset !)
+#if no-changeset
+  $ hg debugindex d | ../no-linkrev
+     rev linkrev nodeid       p1           p2
+       0       * d8252ab2e760 000000000000 000000000000
+       1       * b004912a8510 000000000000 000000000000
+       2       * 7b79e2fe0c89 000000000000 000000000000
+       3       * 17ec97e60577 d8252ab2e760 000000000000
+       4       * 06dabf50734c b004912a8510 17ec97e60577
+       5       * 19c0e3924691 17ec97e60577 b004912a8510
+       6       * 89c873a01d97 7b79e2fe0c89 17ec97e60577
+       7       * d55cb4e9ef57 000000000000 000000000000
+       8       * 1c334238bd42 7b79e2fe0c89 000000000000
+       9       * 646ed7992dec 7b79e2fe0c89 d8252ab2e760 (known-bad-output !)
+#else
+  $ hg debugindex d | ../no-linkrev
+     rev linkrev nodeid       p1           p2
+       0       * ae258f702dfe 000000000000 000000000000
+       1       * b004912a8510 000000000000 000000000000
+       2       * 5cce88bf349f ae258f702dfe 000000000000
+       3       * cc269dd788c8 b004912a8510 5cce88bf349f
+       4       * 51c91a115080 5cce88bf349f b004912a8510
+       5       * cea2d99c0fde ae258f702dfe 000000000000
+#endif
+  $ hg log -G --rev '::(desc("mBF-change-m")+desc("mFB-change-m"))'
+  @    mFB-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - the other way
+  |\
+  +---o  mBF-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - one way
+  | |/
+  | o  f-2: rename i -> d
+  | |
+  | o  f-1: rename h -> i
   | |
-  o |  6 c-1 delete d
+  o |  b-1: b update
+  |/
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
+  |
+  o  i-0 initial commit: a b h p q r
+  
+
+Decision from previous merge are properly chained with later merge
+------------------------------------------------------------------
+
+Subcase: chaining conflicting rename resolution
+```````````````````````````````````````````````
+
+The "mAEm" and "mEAm" case create a rename tracking conflict on file 'f'. We
+add more change on the respective branch and merge again. These second merge
+does not involve the file 'f' and the arbitration done within "mAEm" and "mEA"
+about that file should stay unchanged.
+
+  $ case_desc="chained merges (conflict -> simple) - same content everywhere"
+
+(extra unrelated changes)
+
+  $ hg up 'desc("a-2")'
+  3 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ echo j > unrelated-j
+  $ hg add unrelated-j
+  $ hg ci -m 'j-1: unrelated changes (based on the "a" series of changes)'
+  created new head
+
+  $ hg up 'desc("e-2")'
+  2 files updated, 0 files merged, 2 files removed, 0 files unresolved (no-changeset !)
+  1 files updated, 0 files merged, 2 files removed, 0 files unresolved (changeset !)
+  $ echo k > unrelated-k
+  $ hg add unrelated-k
+  $ hg ci -m 'k-1: unrelated changes (based on "e" changes)'
+  created new head
+
+(merge variant 1)
+
+  $ hg up 'desc("mAEm")'
+  1 files updated, 0 files merged, 2 files removed, 0 files unresolved (no-changeset !)
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved (changeset !)
+  $ hg merge 'desc("k-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mAE,Km: $case_desc"
+
+(merge variant 2)
+
+  $ hg up 'desc("k-1")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-changeset !)
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved (changeset !)
+
+  $ hg merge 'desc("mAEm")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mK,AEm: $case_desc"
+  created new head
+
+(merge variant 3)
+
+  $ hg up 'desc("mEAm")'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("j-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mEA,Jm: $case_desc"
+
+(merge variant 4)
+
+  $ hg up 'desc("j-1")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-changeset !)
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved (changeset !)
+  $ hg merge 'desc("mEAm")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mJ,EAm: $case_desc"
+  created new head
+
+
+  $ hg log -G --rev '::(desc("mAE,Km") + desc("mK,AEm") + desc("mEA,Jm") + desc("mJ,EAm"))'
+  @    mJ,EAm: chained merges (conflict -> simple) - same content everywhere
+  |\
+  +---o  mEA,Jm: chained merges (conflict -> simple) - same content everywhere
+  | |/
+  | | o    mK,AEm: chained merges (conflict -> simple) - same content everywhere
+  | | |\
+  | | +---o  mAE,Km: chained merges (conflict -> simple) - same content everywhere
+  | | | |/
+  | | | o  k-1: unrelated changes (based on "e" changes)
+  | | | |
+  | o | |  j-1: unrelated changes (based on the "a" series of changes)
+  | | | |
+  o-----+  mEAm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - the other way
+  |/ / /
+  | o /  mAEm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - one way
+  |/|/
+  | o  e-2 g -move-> f
+  | |
+  | o  e-1 b -move-> g
+  | |
+  o |  a-2: e -move-> f
   | |
-  o |  2 i-2: c -move-> d
+  o |  a-1: d -move-> e
+  |/
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
+  |
+  o  i-0 initial commit: a b h p q r
+  
+
+Subcase: chaining conflicting rename resolution, with actual merging happening
+``````````````````````````````````````````````````````````````````````````````
+
+The "mPQm" and "mQPm" case create a rename tracking conflict on file 't'. We
+add more change on the respective branch and merge again. These second merge
+does not involve the file 't' and the arbitration done within "mPQm" and "mQP"
+about that file should stay unchanged.
+
+  $ case_desc="chained merges (conflict -> simple) - different content"
+
+(extra unrelated changes)
+
+  $ hg up 'desc("p-2")'
+  3 files updated, 0 files merged, 3 files removed, 0 files unresolved
+  $ echo s > unrelated-s
+  $ hg add unrelated-s
+  $ hg ci -m 's-1: unrelated changes (based on the "p" series of changes)'
+  created new head
+
+  $ hg up 'desc("q-2")'
+  2 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ echo t > unrelated-t
+  $ hg add unrelated-t
+  $ hg ci -m 't-1: unrelated changes (based on "q" changes)'
+  created new head
+
+(merge variant 1)
+
+  $ hg up 'desc("mPQm")'
+  1 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ hg merge 'desc("t-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mPQ,Tm: $case_desc"
+
+(merge variant 2)
+
+  $ hg up 'desc("t-1")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ hg merge 'desc("mPQm")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mT,PQm: $case_desc"
+  created new head
+
+(merge variant 3)
+
+  $ hg up 'desc("mQPm")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("s-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mQP,Sm: $case_desc"
+
+(merge variant 4)
+
+  $ hg up 'desc("s-1")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("mQPm")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mS,QPm: $case_desc"
+  created new head
+  $ hg up null --quiet
+
+
+  $ hg log -G --rev '::(desc("mPQ,Tm") + desc("mT,PQm") + desc("mQP,Sm") + desc("mS,QPm"))'
+  o    mS,QPm: chained merges (conflict -> simple) - different content
+  |\
+  +---o  mQP,Sm: chained merges (conflict -> simple) - different content
+  | |/
+  | | o    mT,PQm: chained merges (conflict -> simple) - different content
+  | | |\
+  | | +---o  mPQ,Tm: chained merges (conflict -> simple) - different content
+  | | | |/
+  | | | o  t-1: unrelated changes (based on "q" changes)
+  | | | |
+  | o | |  s-1: unrelated changes (based on the "p" series of changes)
+  | | | |
+  o-----+  mQPm-0 merge with copies info on both side - P side: rename t to v, Q side: r to v, (different content) - the other way
+  |/ / /
+  | o /  mPQm-0 merge with copies info on both side - P side: rename t to v, Q side: r to v, (different content) - one way
+  |/|/
+  | o  q-2 w -move-> v
   | |
-  o |  1 i-1: a -move-> c
+  | o  q-1 r -move-> w
+  | |
+  o |  p-2: u -move-> v
+  | |
+  o |  p-1: t -move-> u
+  |/
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
+  |
+  o  i-0 initial commit: a b h p q r
+  
+
+Subcase: chaining salvage information during a merge
+````````````````````````````````````````````````````
+
+We add more change on the branch were the file was deleted. merging again
+should preserve the fact eh file was salvaged.
+
+  $ case_desc="chained merges (salvaged -> simple) - same content (when the file exists)"
+
+(creating the change)
+
+  $ hg up 'desc("c-1")'
+  5 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo l > unrelated-l
+  $ hg add unrelated-l
+  $ hg ci -m 'l-1: unrelated changes (based on "c" changes)'
+  created new head
+
+(Merge variant 1)
+
+  $ hg up 'desc("mBC-revert-m")'
+  2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("l-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mBC+revert,Lm: $case_desc"
+
+(Merge variant 2)
+
+  $ hg up 'desc("mCB-revert-m")'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("l-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mCB+revert,Lm: $case_desc"
+
+(Merge variant 3)
+
+  $ hg up 'desc("l-1")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+
+  $ hg merge 'desc("mBC-revert-m")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mL,BC+revertm: $case_desc"
+  created new head
+
+(Merge variant 4)
+
+  $ hg up 'desc("l-1")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+
+  $ hg merge 'desc("mCB-revert-m")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mL,CB+revertm: $case_desc"
+  created new head
+
+  $ hg log -G --rev '::(desc("mBC+revert,Lm") + desc("mCB+revert,Lm") + desc("mL,BC+revertm") + desc("mL,CB+revertm"))'
+  @    mL,CB+revertm: chained merges (salvaged -> simple) - same content (when the file exists)
+  |\
+  | | o  mL,BC+revertm: chained merges (salvaged -> simple) - same content (when the file exists)
+  | |/|
+  +-+---o  mCB+revert,Lm: chained merges (salvaged -> simple) - same content (when the file exists)
+  | | |
+  | +---o  mBC+revert,Lm: chained merges (salvaged -> simple) - same content (when the file exists)
+  | | |/
+  | o |  l-1: unrelated changes (based on "c" changes)
+  | | |
+  | | o  mBC-revert-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - the other way
+  | |/|
+  o---+  mCB-revert-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - one way
+  |/ /
+  o |  c-1 delete d
+  | |
+  | o  b-1: b update
   |/
-  o  0 i-0 initial commit: a b h
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
+  |
+  o  i-0 initial commit: a b h p q r
   
 
 
+Subcase: chaining "merged" information during a merge
+``````````````````````````````````````````````````````
+
+When a non-rename change are merged with a copy overwrite, the merge pick the copy source from (p1) as the reference. We should preserve this information in subsequent merges.
+
+  $ case_desc="chained merges (copy-overwrite -> simple) - same content"
+
+(extra unrelated changes)
+
+  $ hg up 'desc("f-2")'
+  2 files updated, 0 files merged, 2 files removed, 0 files unresolved (no-changeset !)
+  1 files updated, 0 files merged, 2 files removed, 0 files unresolved (changeset !)
+  $ echo n > unrelated-n
+  $ hg add unrelated-n
+  $ hg ci -m 'n-1: unrelated changes (based on the "f" series of changes)'
+  created new head
+
+  $ hg up 'desc("g-1")'
+  2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ echo o > unrelated-o
+  $ hg add unrelated-o
+  $ hg ci -m 'o-1: unrelated changes (based on "g" changes)'
+  created new head
+
+(merge variant 1)
+
+  $ hg up 'desc("mFGm")'
+  1 files updated, 0 files merged, 2 files removed, 0 files unresolved (no-changeset !)
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved (changeset !)
+  $ hg merge 'desc("o-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mFG,Om: $case_desc"
+
+(merge variant 2)
+
+  $ hg up 'desc("o-1")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-changeset !)
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved (changeset !)
+  $ hg merge 'desc("FGm")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved (no-changeset !)
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved (changeset !)
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mO,FGm: $case_desc"
+  created new head
+
+(merge variant 3)
+
+  $ hg up 'desc("mGFm")'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("n-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mGF,Nm: $case_desc"
+
+(merge variant 4)
+
+  $ hg up 'desc("n-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("mGFm")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mN,GFm: $case_desc"
+  created new head
+
+  $ hg log -G --rev '::(desc("mFG,Om") + desc("mO,FGm") + desc("mGF,Nm") + desc("mN,GFm"))'
+  @    mN,GFm: chained merges (copy-overwrite -> simple) - same content
+  |\
+  +---o  mGF,Nm: chained merges (copy-overwrite -> simple) - same content
+  | |/
+  | | o    mO,FGm: chained merges (copy-overwrite -> simple) - same content
+  | | |\
+  | | +---o  mFG,Om: chained merges (copy-overwrite -> simple) - same content
+  | | | |/
+  | | | o  o-1: unrelated changes (based on "g" changes)
+  | | | |
+  | o | |  n-1: unrelated changes (based on the "f" series of changes)
+  | | | |
+  o-----+  mGFm-0 merge - G side: content change, F side: copy overwrite, no content change - the other way
+  |/ / /
+  | o /  mFGm-0 merge - G side: content change, F side: copy overwrite, no content change - one way
+  |/|/
+  | o  g-1: update d
+  | |
+  o |  f-2: rename i -> d
+  | |
+  o |  f-1: rename h -> i
+  |/
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
+  |
+  o  i-0 initial commit: a b h p q r
+  
+
+Subcase: chaining conflicting rename resolution, with extra change during the merge
+```````````````````````````````````````````````````````````````````````````````````
+
+The "mEA-change-m-0" and "mAE-change-m-0" case create a rename tracking conflict on file 'f'. We
+add more change on the respective branch and merge again. These second merge
+does not involve the file 'f' and the arbitration done within "mAEm" and "mEA"
+about that file should stay unchanged.
+
+  $ case_desc="chained merges (conflict+change -> simple) - same content on both branch in the initial merge"
+
+
+(merge variant 1)
+
+  $ hg up 'desc("mAE-change-m")'
+  2 files updated, 0 files merged, 3 files removed, 0 files unresolved
+  $ hg merge 'desc("k-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mAE-change,Km: $case_desc"
+
+(merge variant 2)
+
+  $ hg up 'desc("k-1")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ hg merge 'desc("mAE-change-m")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mK,AE-change-m: $case_desc"
+  created new head
+
+(merge variant 3)
+
+  $ hg up 'desc("mEA-change-m")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("j-1")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mEA-change,Jm: $case_desc"
+
+(merge variant 4)
+
+  $ hg up 'desc("j-1")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge 'desc("mEA-change-m")'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -m "mJ,EA-change-m: $case_desc"
+  created new head
+
+
+  $ hg log -G --rev '::(desc("mAE-change,Km") + desc("mK,AE-change-m") + desc("mEA-change,Jm") + desc("mJ,EA-change-m"))'
+  @    mJ,EA-change-m: chained merges (conflict+change -> simple) - same content on both branch in the initial merge
+  |\
+  +---o  mEA-change,Jm: chained merges (conflict+change -> simple) - same content on both branch in the initial merge
+  | |/
+  | | o    mK,AE-change-m: chained merges (conflict+change -> simple) - same content on both branch in the initial merge
+  | | |\
+  | | +---o  mAE-change,Km: chained merges (conflict+change -> simple) - same content on both branch in the initial merge
+  | | | |/
+  | | | o  k-1: unrelated changes (based on "e" changes)
+  | | | |
+  | o | |  j-1: unrelated changes (based on the "a" series of changes)
+  | | | |
+  o-----+  mEA-change-m-0 merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent) - the other way
+  |/ / /
+  | o /  mAE-change-m-0 merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent) - one way
+  |/|/
+  | o  e-2 g -move-> f
+  | |
+  | o  e-1 b -move-> g
+  | |
+  o |  a-2: e -move-> f
+  | |
+  o |  a-1: d -move-> e
+  |/
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
+  |
+  o  i-0 initial commit: a b h p q r
+  
+
 Summary of all created cases
 ----------------------------
 
@@ -650,31 +1478,72 @@
   f-2: rename i -> d
   g-1: update d
   h-1: b -(move)-> d
-  i-0 initial commit: a b h
-  i-1: a -move-> c
-  i-2: c -move-> d
-  mABm-0 simple merge - the other way
-  mAEm-0 simple merge - one way
-  mBAm-0 simple merge - one way
-  mBC-revert-m-0
-  mBCm-0 simple merge - one way
+  i-0 initial commit: a b h p q r
+  i-1: a -move-> c, p -move-> s
+  i-2: c -move-> d, s -move-> t
+  j-1: unrelated changes (based on the "a" series of changes)
+  k-1: unrelated changes (based on "e" changes)
+  l-1: unrelated changes (based on "c" changes)
+  mABm-0 simple merge - A side: multiple renames, B side: unrelated update - the other way
+  mAE,Km: chained merges (conflict -> simple) - same content everywhere
+  mAE-change,Km: chained merges (conflict+change -> simple) - same content on both branch in the initial merge
+  mAE-change-m-0 merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent) - one way
+  mAEm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - one way
+  mBAm-0 simple merge - A side: multiple renames, B side: unrelated update - one way
+  mBC+revert,Lm: chained merges (salvaged -> simple) - same content (when the file exists)
+  mBC-revert-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - the other way
+  mBCm-0 simple merge - C side: delete a file with copies history , B side: unrelated update - one way
   mBCm-1 re-add d
-  mBDm-0 simple merge - one way
-  mBFm-0 simple merge - one way
-  mCB-revert-m-0
-  mCBm-0 simple merge - the other way
+  mBDm-0 simple merge - B side: unrelated update, D side: delete and recreate a file (with different content) - one way
+  mBF-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - one way
+  mBFm-0 simple merge - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - one way
+  mBRm-0 simple merge - B side: unrelated change, R side: overwrite d with a copy (from r->x->t) different content - one way
+  mCB+revert,Lm: chained merges (salvaged -> simple) - same content (when the file exists)
+  mCB-revert-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - one way
+  mCBm-0 simple merge - C side: delete a file with copies history , B side: unrelated update - the other way
   mCBm-1 re-add d
-  mCGm-0
-  mCH-delete-before-conflict-m-0
-  mDBm-0 simple merge - the other way
-  mDGm-0 simple merge - one way
-  mEAm-0 simple merge - the other way
-  mFBm-0 simple merge - the other way
-  mFGm-0 simple merge - one way
-  mGCm-0
-  mGDm-0 simple merge - the other way
-  mGFm-0 simple merge - the other way
-  mHC-delete-before-conflict-m-0
+  mCGm-0 merge updated/deleted - revive the file (updated content) - one way
+  mCH-delete-before-conflict-m-0 simple merge - C side: d is the results of renames then deleted, H side: d is result of another rename (same content as the other branch) - one way
+  mDBm-0 simple merge - B side: unrelated update, D side: delete and recreate a file (with different content) - the other way
+  mDGm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - one way
+  mEA,Jm: chained merges (conflict -> simple) - same content everywhere
+  mEA-change,Jm: chained merges (conflict+change -> simple) - same content on both branch in the initial merge
+  mEA-change-m-0 merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent) - the other way
+  mEAm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - the other way
+  mFB-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - the other way
+  mFBm-0 simple merge - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - the other way
+  mFG,Om: chained merges (copy-overwrite -> simple) - same content
+  mFGm-0 merge - G side: content change, F side: copy overwrite, no content change - one way
+  mGCm-0 merge updated/deleted - revive the file (updated content) - the other way
+  mGDm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - the other way
+  mGF,Nm: chained merges (copy-overwrite -> simple) - same content
+  mGFm-0 merge - G side: content change, F side: copy overwrite, no content change - the other way
+  mHC-delete-before-conflict-m-0 simple merge - C side: d is the results of renames then deleted, H side: d is result of another rename (same content as the other branch) - the other way
+  mJ,EA-change-m: chained merges (conflict+change -> simple) - same content on both branch in the initial merge
+  mJ,EAm: chained merges (conflict -> simple) - same content everywhere
+  mK,AE-change-m: chained merges (conflict+change -> simple) - same content on both branch in the initial merge
+  mK,AEm: chained merges (conflict -> simple) - same content everywhere
+  mL,BC+revertm: chained merges (salvaged -> simple) - same content (when the file exists)
+  mL,CB+revertm: chained merges (salvaged -> simple) - same content (when the file exists)
+  mN,GFm: chained merges (copy-overwrite -> simple) - same content
+  mO,FGm: chained merges (copy-overwrite -> simple) - same content
+  mPQ,Tm: chained merges (conflict -> simple) - different content
+  mPQm-0 merge with copies info on both side - P side: rename t to v, Q side: r to v, (different content) - one way
+  mQP,Sm: chained merges (conflict -> simple) - different content
+  mQPm-0 merge with copies info on both side - P side: rename t to v, Q side: r to v, (different content) - the other way
+  mRBm-0 simple merge - B side: unrelated change, R side: overwrite d with a copy (from r->x->t) different content - the other way
+  mS,QPm: chained merges (conflict -> simple) - different content
+  mT,PQm: chained merges (conflict -> simple) - different content
+  n-1: unrelated changes (based on the "f" series of changes)
+  o-1: unrelated changes (based on "g" changes)
+  p-1: t -move-> u
+  p-2: u -move-> v
+  q-1 r -move-> w
+  q-2 w -move-> v
+  r-1: rename r -> x
+  r-2: rename t -> x
+  s-1: unrelated changes (based on the "p" series of changes)
+  t-1: unrelated changes (based on "q" changes)
 
 
 Test that sidedata computations during upgrades are correct
@@ -698,9 +1567,9 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:            no    yes      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no    yes      no
+  revlog-v2:           no    yes      no
   plain-cl-delta:     yes    yes     yes
   compression:        * (glob)
   compression-level:  default default default
@@ -709,7 +1578,47 @@
   
   requirements
      preserved: * (glob)
-     added: exp-copies-sidedata-changeset, exp-sidedata-flag
+     removed: revlogv1
+     added: exp-copies-sidedata-changeset, exp-revlogv2.2, exp-sidedata-flag
+  
+  processed revlogs:
+    - all-filelogs
+    - changelog
+    - manifest
+  
+#endif
+
+#if upgraded-parallel
+  $ cat >> $HGRCPATH << EOF
+  > [format]
+  > exp-use-side-data = yes
+  > exp-use-copies-side-data-changeset = yes
+  > [experimental]
+  > worker.repository-upgrade=yes
+  > [worker]
+  > enabled=yes
+  > numcpus=8
+  > EOF
+  $ hg debugformat -v
+  format-variant     repo config default
+  fncache:            yes    yes     yes
+  dotencode:          yes    yes     yes
+  generaldelta:       yes    yes     yes
+  share-safe:          no     no      no
+  sparserevlog:       yes    yes     yes
+  persistent-nodemap:  no     no      no
+  copies-sdc:          no    yes      no
+  revlog-v2:           no    yes      no
+  plain-cl-delta:     yes    yes     yes
+  compression:        * (glob)
+  compression-level:  default default default
+  $ hg debugupgraderepo --run --quiet
+  upgrade will perform the following actions:
+  
+  requirements
+     preserved: * (glob)
+     removed: revlogv1
+     added: exp-copies-sidedata-changeset, exp-revlogv2.2, exp-sidedata-flag
   
   processed revlogs:
     - all-filelogs
@@ -721,194 +1630,409 @@
 
 #if no-compatibility no-filelog no-changeset
 
+  $ hg debugchangedfiles --compute 0
+  added      : a, ;
+  added      : b, ;
+  added      : h, ;
+  added      : p, ;
+  added      : q, ;
+  added      : r, ;
+
   $ for rev in `hg log --rev 'all()' -T '{rev}\n'`; do
-  >     echo "##### revision $rev #####"
+  >     case_id=`hg log -r $rev -T '{word(0, desc, ":")}\n'`
+  >     echo "##### revision \"$case_id\" #####"
   >     hg debugsidedata -c -v -- $rev
   >     hg debugchangedfiles $rev
   > done
-  ##### revision 0 #####
+  ##### revision "i-0 initial commit" #####
   1 sidedata entries
-   entry-0014 size 34
-    '\x00\x00\x00\x03\x04\x00\x00\x00\x01\x00\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00abh'
+   entry-0014 size 64
+    '\x00\x00\x00\x06\x04\x00\x00\x00\x01\x00\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00\x04\x00\x00\x00\x04\x00\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x00abhpqr'
   added      : a, ;
   added      : b, ;
   added      : h, ;
-  ##### revision 1 #####
+  added      : p, ;
+  added      : q, ;
+  added      : r, ;
+  ##### revision "i-1" #####
   1 sidedata entries
-   entry-0014 size 24
-    '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00ac'
+   entry-0014 size 44
+    '\x00\x00\x00\x04\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00\x0c\x00\x00\x00\x03\x00\x00\x00\x00\x06\x00\x00\x00\x04\x00\x00\x00\x02acps'
   removed    : a, ;
   added    p1: c, a;
-  ##### revision 2 #####
+  removed    : p, ;
+  added    p1: s, p;
+  ##### revision "i-2" #####
   1 sidedata entries
-   entry-0014 size 24
-    '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00cd'
+   entry-0014 size 44
+    '\x00\x00\x00\x04\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00\x0c\x00\x00\x00\x03\x00\x00\x00\x00\x06\x00\x00\x00\x04\x00\x00\x00\x02cdst'
   removed    : c, ;
   added    p1: d, c;
-  ##### revision 3 #####
+  removed    : s, ;
+  added    p1: t, s;
+  ##### revision "a-1" #####
   1 sidedata entries
    entry-0014 size 24
     '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00de'
   removed    : d, ;
   added    p1: e, d;
-  ##### revision 4 #####
+  ##### revision "a-2" #####
   1 sidedata entries
    entry-0014 size 24
     '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00ef'
   removed    : e, ;
   added    p1: f, e;
-  ##### revision 5 #####
+  ##### revision "b-1" #####
   1 sidedata entries
    entry-0014 size 14
     '\x00\x00\x00\x01\x14\x00\x00\x00\x01\x00\x00\x00\x00b'
   touched    : b, ;
-  ##### revision 6 #####
+  ##### revision "c-1 delete d" #####
   1 sidedata entries
    entry-0014 size 14
     '\x00\x00\x00\x01\x0c\x00\x00\x00\x01\x00\x00\x00\x00d'
   removed    : d, ;
-  ##### revision 7 #####
+  ##### revision "d-1 delete d" #####
   1 sidedata entries
    entry-0014 size 14
     '\x00\x00\x00\x01\x0c\x00\x00\x00\x01\x00\x00\x00\x00d'
   removed    : d, ;
-  ##### revision 8 #####
+  ##### revision "d-2 re-add d" #####
   1 sidedata entries
    entry-0014 size 14
     '\x00\x00\x00\x01\x04\x00\x00\x00\x01\x00\x00\x00\x00d'
   added      : d, ;
-  ##### revision 9 #####
+  ##### revision "e-1 b -move-> g" #####
   1 sidedata entries
    entry-0014 size 24
     '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00bg'
   removed    : b, ;
   added    p1: g, b;
-  ##### revision 10 #####
+  ##### revision "e-2 g -move-> f" #####
   1 sidedata entries
    entry-0014 size 24
     '\x00\x00\x00\x02\x06\x00\x00\x00\x01\x00\x00\x00\x01\x0c\x00\x00\x00\x02\x00\x00\x00\x00fg'
   added    p1: f, g;
   removed    : g, ;
-  ##### revision 11 #####
+  ##### revision "p-1" #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00tu'
+  removed    : t, ;
+  added    p1: u, t;
+  ##### revision "p-2" #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00uv'
+  removed    : u, ;
+  added    p1: v, u;
+  ##### revision "q-1 r -move-> w" #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00rw'
+  removed    : r, ;
+  added    p1: w, r;
+  ##### revision "q-2 w -move-> v" #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x06\x00\x00\x00\x01\x00\x00\x00\x01\x0c\x00\x00\x00\x02\x00\x00\x00\x00vw'
+  added    p1: v, w;
+  removed    : w, ;
+  ##### revision "mBAm-0 simple merge - A side" #####
   1 sidedata entries
    entry-0014 size 4
     '\x00\x00\x00\x00'
-  ##### revision 12 #####
+  ##### revision "mABm-0 simple merge - A side" #####
   1 sidedata entries
    entry-0014 size 4
     '\x00\x00\x00\x00'
-  ##### revision 13 #####
+  ##### revision "mBCm-0 simple merge - C side" #####
   1 sidedata entries
    entry-0014 size 4
     '\x00\x00\x00\x00'
-  ##### revision 14 #####
+  ##### revision "mBCm-1 re-add d" #####
   1 sidedata entries
    entry-0014 size 14
     '\x00\x00\x00\x01\x04\x00\x00\x00\x01\x00\x00\x00\x00d'
   added      : d, ;
-  ##### revision 15 #####
+  ##### revision "mCBm-0 simple merge - C side" #####
   1 sidedata entries
    entry-0014 size 4
     '\x00\x00\x00\x00'
-  ##### revision 16 #####
+  ##### revision "mCBm-1 re-add d" #####
   1 sidedata entries
    entry-0014 size 14
     '\x00\x00\x00\x01\x04\x00\x00\x00\x01\x00\x00\x00\x00d'
   added      : d, ;
-  ##### revision 17 #####
+  ##### revision "mBDm-0 simple merge - B side" #####
   1 sidedata entries
    entry-0014 size 4
     '\x00\x00\x00\x00'
-  ##### revision 18 #####
+  ##### revision "mDBm-0 simple merge - B side" #####
   1 sidedata entries
    entry-0014 size 4
     '\x00\x00\x00\x00'
-  ##### revision 19 #####
+  ##### revision "mAEm-0 merge with copies info on both side - A side" #####
   1 sidedata entries
    entry-0014 size 14
     '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00f'
   merged     : f, ;
-  ##### revision 20 #####
+  ##### revision "mEAm-0 merge with copies info on both side - A side" #####
   1 sidedata entries
    entry-0014 size 14
     '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00f'
   merged     : f, ;
-  ##### revision 21 #####
+  ##### revision "mPQm-0 merge with copies info on both side - P side" #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00v'
+  merged     : v, ;
+  ##### revision "mQPm-0 merge with copies info on both side - P side" #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00v'
+  merged     : v, ;
+  ##### revision "f-1" #####
   1 sidedata entries
    entry-0014 size 24
     '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00hi'
   removed    : h, ;
   added    p1: i, h;
-  ##### revision 22 #####
+  ##### revision "f-2" #####
   1 sidedata entries
    entry-0014 size 24
     '\x00\x00\x00\x02\x16\x00\x00\x00\x01\x00\x00\x00\x01\x0c\x00\x00\x00\x02\x00\x00\x00\x00di'
   touched  p1: d, i;
   removed    : i, ;
-  ##### revision 23 #####
+  ##### revision "mBFm-0 simple merge - B side" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mFBm-0 simple merge - B side" #####
   1 sidedata entries
    entry-0014 size 4
     '\x00\x00\x00\x00'
-  ##### revision 24 #####
+  ##### revision "r-1" #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00rx'
+  removed    : r, ;
+  added    p1: x, r;
+  ##### revision "r-2" #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x02\x16\x00\x00\x00\x01\x00\x00\x00\x01\x0c\x00\x00\x00\x02\x00\x00\x00\x00tx'
+  touched  p1: t, x;
+  removed    : x, ;
+  ##### revision "mBRm-0 simple merge - B side" #####
   1 sidedata entries
    entry-0014 size 4
     '\x00\x00\x00\x00'
-  ##### revision 25 #####
+  ##### revision "mRBm-0 simple merge - B side" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "g-1" #####
   1 sidedata entries
    entry-0014 size 14
     '\x00\x00\x00\x01\x14\x00\x00\x00\x01\x00\x00\x00\x00d'
   touched    : d, ;
-  ##### revision 26 #####
-  1 sidedata entries
-   entry-0014 size 14
-    '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d'
-  merged     : d, ;
-  ##### revision 27 #####
+  ##### revision "mDGm-0 actual content merge, copies on one side - D side" #####
   1 sidedata entries
    entry-0014 size 14
     '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d'
   merged     : d, ;
-  ##### revision 28 #####
+  ##### revision "mGDm-0 actual content merge, copies on one side - D side" #####
   1 sidedata entries
    entry-0014 size 14
     '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d'
   merged     : d, ;
-  ##### revision 29 #####
+  ##### revision "mFGm-0 merge - G side" #####
   1 sidedata entries
    entry-0014 size 14
     '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d'
   merged     : d, ;
-  ##### revision 30 #####
+  ##### revision "mGFm-0 merge - G side" #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d'
+  merged     : d, ;
+  ##### revision "mCGm-0 merge updated/deleted - revive the file (updated content) - one way" #####
   1 sidedata entries
    entry-0014 size 14
     '\x00\x00\x00\x01\x10\x00\x00\x00\x01\x00\x00\x00\x00d'
   salvaged   : d, ;
-  ##### revision 31 #####
+  ##### revision "mGCm-0 merge updated/deleted - revive the file (updated content) - the other way" #####
   1 sidedata entries
    entry-0014 size 14
     '\x00\x00\x00\x01\x10\x00\x00\x00\x01\x00\x00\x00\x00d'
   salvaged   : d, ;
-  ##### revision 32 #####
+  ##### revision "mCB-revert-m-0 merge explicitely revive deleted file - B side" #####
   1 sidedata entries
    entry-0014 size 14
     '\x00\x00\x00\x01\x10\x00\x00\x00\x01\x00\x00\x00\x00d'
   salvaged   : d, ;
-  ##### revision 33 #####
+  ##### revision "mBC-revert-m-0 merge explicitely revive deleted file - B side" #####
   1 sidedata entries
    entry-0014 size 14
     '\x00\x00\x00\x01\x10\x00\x00\x00\x01\x00\x00\x00\x00d'
   salvaged   : d, ;
-  ##### revision 34 #####
+  ##### revision "h-1" #####
   1 sidedata entries
    entry-0014 size 24
     '\x00\x00\x00\x02\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x06\x00\x00\x00\x02\x00\x00\x00\x00bd'
   removed    : b, ;
   added    p1: d, b;
-  ##### revision 35 #####
+  ##### revision "mCH-delete-before-conflict-m-0 simple merge - C side" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mHC-delete-before-conflict-m-0 simple merge - C side" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mAE-change-m-0 merge with file update and copies info on both side - A side" #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00f'
+  merged     : f, ;
+  ##### revision "mEA-change-m-0 merge with file update and copies info on both side - A side" #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00f'
+  merged     : f, ;
+  ##### revision "mBF-change-m-0 merge with extra change - B side" #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x14\x00\x00\x00\x01\x00\x00\x00\x00d' (no-upgraded no-upgraded-parallel !)
+  touched    : d, ; (no-upgraded no-upgraded-parallel !)
+    '\x00\x00\x00\x01\x14\x00\x00\x00\x01\x00\x00\x00\x00d' (upgraded missing-correct-output !)
+  touched    : d, ; (upgraded missing-correct-output !)
+    '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d' (upgraded known-bad-output !)
+  merged     : d, ; (upgraded known-bad-output !)
+    '\x00\x00\x00\x01\x14\x00\x00\x00\x01\x00\x00\x00\x00d' (upgraded-parallel missing-correct-output !)
+  touched    : d, ; (upgraded-parallel missing-correct-output !)
+    '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d' (upgraded-parallel known-bad-output !)
+  merged     : d, ; (upgraded-parallel known-bad-output !)
+  ##### revision "mFB-change-m-0 merge with extra change - B side" #####
+  1 sidedata entries
+   entry-0014 size 14
+    '\x00\x00\x00\x01\x08\x00\x00\x00\x01\x00\x00\x00\x00d' (known-bad-output !)
+  merged     : d, ; (known-bad-output !)
+    '\x00\x00\x00\x01\x14\x00\x00\x00\x01\x00\x00\x00\x00d' (missing-correct-output !)
+  touched    : d, ; (missing-correct-output !)
+  ##### revision "j-1" #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x01\x04\x00\x00\x00\x0b\x00\x00\x00\x00unrelated-j'
+  added      : unrelated-j, ;
+  ##### revision "k-1" #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x01\x04\x00\x00\x00\x0b\x00\x00\x00\x00unrelated-k'
+  added      : unrelated-k, ;
+  ##### revision "mAE,Km" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mK,AEm" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mEA,Jm" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mJ,EAm" #####
   1 sidedata entries
    entry-0014 size 4
     '\x00\x00\x00\x00'
-  ##### revision 36 #####
+  ##### revision "s-1" #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x01\x04\x00\x00\x00\x0b\x00\x00\x00\x00unrelated-s'
+  added      : unrelated-s, ;
+  ##### revision "t-1" #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x01\x04\x00\x00\x00\x0b\x00\x00\x00\x00unrelated-t'
+  added      : unrelated-t, ;
+  ##### revision "mPQ,Tm" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mT,PQm" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mQP,Sm" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mS,QPm" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "l-1" #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x01\x04\x00\x00\x00\x0b\x00\x00\x00\x00unrelated-l'
+  added      : unrelated-l, ;
+  ##### revision "mBC+revert,Lm" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mCB+revert,Lm" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mL,BC+revertm" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mL,CB+revertm" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "n-1" #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x01\x04\x00\x00\x00\x0b\x00\x00\x00\x00unrelated-n'
+  added      : unrelated-n, ;
+  ##### revision "o-1" #####
+  1 sidedata entries
+   entry-0014 size 24
+    '\x00\x00\x00\x01\x04\x00\x00\x00\x0b\x00\x00\x00\x00unrelated-o'
+  added      : unrelated-o, ;
+  ##### revision "mFG,Om" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mO,FGm" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mGF,Nm" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mN,GFm" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mAE-change,Km" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mK,AE-change-m" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mEA-change,Jm" #####
+  1 sidedata entries
+   entry-0014 size 4
+    '\x00\x00\x00\x00'
+  ##### revision "mJ,EA-change-m" #####
   1 sidedata entries
    entry-0014 size 4
     '\x00\x00\x00\x00'
@@ -927,7 +2051,10 @@
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("a-2")'
   A f
     a
+  A t
+    p
   R a
+  R p
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("a-2")' f
   A f
     a (no-changeset no-compatibility !)
@@ -939,21 +2066,21 @@
 - unrelated change on the other side
 
   $ hg log -G --rev '::(desc("mABm")+desc("mBAm"))'
-  o    12 mABm-0 simple merge - the other way
+  o    mABm-0 simple merge - A side: multiple renames, B side: unrelated update - the other way
   |\
-  +---o  11 mBAm-0 simple merge - one way
+  +---o  mBAm-0 simple merge - A side: multiple renames, B side: unrelated update - one way
   | |/
-  | o  5 b-1: b update
+  | o  b-1: b update
   | |
-  o |  4 a-2: e -move-> f
+  o |  a-2: e -move-> f
   | |
-  o |  3 a-1: d -move-> e
+  o |  a-1: d -move-> e
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
   $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mABm")'
@@ -982,12 +2109,18 @@
   M b
   A f
     a
+  A t
+    p
   R a
+  R p
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBAm")'
   M b
   A f
     a
+  A t
+    p
   R a
+  R p
 
 merging with the side having a delete
 -------------------------------------
@@ -998,23 +2131,23 @@
 and recreate an unrelated file after the merge
 
   $ hg log -G --rev '::(desc("mCBm")+desc("mBCm"))'
-  o  16 mCBm-1 re-add d
+  o  mCBm-1 re-add d
   |
-  o    15 mCBm-0 simple merge - the other way
+  o    mCBm-0 simple merge - C side: delete a file with copies history , B side: unrelated update - the other way
   |\
-  | | o  14 mBCm-1 re-add d
+  | | o  mBCm-1 re-add d
   | | |
-  +---o  13 mBCm-0 simple merge - one way
+  +---o  mBCm-0 simple merge - C side: delete a file with copies history , B side: unrelated update - one way
   | |/
-  | o  6 c-1 delete d
+  | o  c-1 delete d
   | |
-  o |  5 b-1: b update
+  o |  b-1: b update
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 - comparing from the merge
 
@@ -1034,10 +2167,16 @@
   R d
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBCm-0")'
   M b
+  A t
+    p
   R a
+  R p
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCBm-0")'
   M b
+  A t
+    p
   R a
+  R p
 
 - comparing with the merge children re-adding the file
 
@@ -1060,11 +2199,17 @@
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBCm-1")'
   M b
   A d
+  A t
+    p
   R a
+  R p
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCBm-1")'
   M b
   A d
+  A t
+    p
   R a
+  R p
 
 Comparing with a merge re-adding the file afterward
 ---------------------------------------------------
@@ -1074,21 +2219,21 @@
 - one deleting and recreating the change
 
   $ hg log -G --rev '::(desc("mDBm")+desc("mBDm"))'
-  o    18 mDBm-0 simple merge - the other way
+  o    mDBm-0 simple merge - B side: unrelated update, D side: delete and recreate a file (with different content) - the other way
   |\
-  +---o  17 mBDm-0 simple merge - one way
+  +---o  mBDm-0 simple merge - B side: unrelated update, D side: delete and recreate a file (with different content) - one way
   | |/
-  | o  8 d-2 re-add d
+  | o  d-2 re-add d
   | |
-  | o  7 d-1 delete d
+  | o  d-1 delete d
   | |
-  o |  5 b-1: b update
+  o |  b-1: b update
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
   $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBDm-0")'
   M d
@@ -1115,93 +2260,102 @@
   $ hg manifest --debug --rev 'desc("d-2")' | grep '644   d'
   b004912a8510032a0350a74daa2803dadfb00e12 644   d
   $ hg manifest --debug --rev 'desc("b-1")' | grep '644   d'
-  169be882533bc917905d46c0c951aa9a1e288dcf 644   d (no-changeset !)
-  b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 644   d (changeset !)
-  $ hg debugindex d | head -n 4
+  d8252ab2e760b0d4e5288fd44cbd15a0fa567e16 644   d (no-changeset !)
+  ae258f702dfeca05bf9b6a22a97a4b5645570f11 644   d (changeset !)
+  $ hg debugindex d | head -n 4 | ../no-linkrev
      rev linkrev nodeid       p1           p2
-       0       2 169be882533b 000000000000 000000000000 (no-changeset !)
-       0       2 b789fdd96dc2 000000000000 000000000000 (changeset !)
-       1       8 b004912a8510 000000000000 000000000000
-       2      22 4a067cf8965d 000000000000 000000000000 (no-changeset !)
-       2      22 fe6f8b4f507f 000000000000 000000000000 (changeset !)
+       0       * d8252ab2e760 000000000000 000000000000 (no-changeset !)
+       0       * ae258f702dfe 000000000000 000000000000 (changeset !)
+       1       * b004912a8510 000000000000 000000000000
+       2       * 7b79e2fe0c89 000000000000 000000000000 (no-changeset !)
+       2       * 5cce88bf349f ae258f702dfe 000000000000 (changeset !)
 
 Log output should not include a merge commit as it did not happen
 
   $ hg log -Gfr 'desc("mBDm-0")' d
-  o  8 d-2 re-add d
+  o  d-2 re-add d
   |
   ~
 
   $ hg log -Gfr 'desc("mDBm-0")' d
-  o  8 d-2 re-add d
+  o  d-2 re-add d
   |
   ~
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBDm-0")'
   M b
   A d
+  A t
+    p
   R a
+  R p
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mDBm-0")'
   M b
   A d
+  A t
+    p
   R a
+  R p
 
 
 Comparing with a merge with colliding rename
 --------------------------------------------
 
+Subcase: new copy information on both side
+``````````````````````````````````````````
+
 - the "e-" branch renaming b to f (through 'g')
 - the "a-" branch renaming d to f (through e)
 
   $ hg log -G --rev '::(desc("mAEm")+desc("mEAm"))'
-  o    20 mEAm-0 simple merge - the other way
+  o    mEAm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - the other way
   |\
-  +---o  19 mAEm-0 simple merge - one way
+  +---o  mAEm-0 merge with copies info on both side - A side: rename d to f, E side: b to f, (same content for f) - one way
   | |/
-  | o  10 e-2 g -move-> f
+  | o  e-2 g -move-> f
   | |
-  | o  9 e-1 b -move-> g
+  | o  e-1 b -move-> g
   | |
-  o |  4 a-2: e -move-> f
+  o |  a-2: e -move-> f
   | |
-  o |  3 a-1: d -move-> e
+  o |  a-1: d -move-> e
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 #if no-changeset
   $ hg manifest --debug --rev 'desc("mAEm-0")' | grep '644   f'
-  c39c6083dad048d5138618a46f123e2f397f4f18 644   f
+  2ff93c643948464ee1f871867910ae43a45b0bea 644   f
   $ hg manifest --debug --rev 'desc("mEAm-0")' | grep '644   f'
-  a9a8bc3860c9d8fa5f2f7e6ea8d40498322737fd 644   f
+  2ff93c643948464ee1f871867910ae43a45b0bea 644   f
   $ hg manifest --debug --rev 'desc("a-2")' | grep '644   f'
-  263ea25e220aaeb7b9bac551c702037849aa75e8 644   f
+  b76eb76580df486c3d51d63c5c210d4dd43a8ac7 644   f
   $ hg manifest --debug --rev 'desc("e-2")' | grep '644   f'
-  71b9b7e73d973572ade6dd765477fcee6890e8b1 644   f
-  $ hg debugindex f
+  e8825b386367b29fec957283a80bb47b47483fe1 644   f
+  $ hg debugindex f | ../no-linkrev
      rev linkrev nodeid       p1           p2
-       0       4 263ea25e220a 000000000000 000000000000
-       1      10 71b9b7e73d97 000000000000 000000000000
-       2      19 c39c6083dad0 263ea25e220a 71b9b7e73d97
-       3      20 a9a8bc3860c9 71b9b7e73d97 263ea25e220a
+       0       * b76eb76580df 000000000000 000000000000
+       1       * e8825b386367 000000000000 000000000000
+       2       * 2ff93c643948 b76eb76580df e8825b386367
+       3       * 2f649fba7eb2 b76eb76580df e8825b386367
+       4       * 774e7c1637d5 e8825b386367 b76eb76580df
 #else
   $ hg manifest --debug --rev 'desc("mAEm-0")' | grep '644   f'
-  498e8799f49f9da1ca06bb2d6d4accf165c5b572 644   f
+  ae258f702dfeca05bf9b6a22a97a4b5645570f11 644   f
   $ hg manifest --debug --rev 'desc("mEAm-0")' | grep '644   f'
-  c5b506a7118667a38a9c9348a1f63b679e382f57 644   f
+  ae258f702dfeca05bf9b6a22a97a4b5645570f11 644   f
   $ hg manifest --debug --rev 'desc("a-2")' | grep '644   f'
-  b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 644   f
+  ae258f702dfeca05bf9b6a22a97a4b5645570f11 644   f
   $ hg manifest --debug --rev 'desc("e-2")' | grep '644   f'
-  1e88685f5ddec574a34c70af492f95b6debc8741 644   f
-  $ hg debugindex f
+  ae258f702dfeca05bf9b6a22a97a4b5645570f11 644   f
+  $ hg debugindex f | ../no-linkrev
      rev linkrev nodeid       p1           p2
-       0       4 b789fdd96dc2 000000000000 000000000000
-       1      10 1e88685f5dde 000000000000 000000000000
-       2      19 498e8799f49f b789fdd96dc2 1e88685f5dde
-       3      20 c5b506a71186 1e88685f5dde b789fdd96dc2
+       0       * ae258f702dfe 000000000000 000000000000
+       1       * d3613c1ec831 ae258f702dfe 000000000000
+       2       * 05e03c868bbc ae258f702dfe 000000000000
 #endif
 
 # Here the filelog based implementation is not looking at the rename
@@ -1209,20 +2363,20 @@
 # based on works fine. We have different output.
 
   $ hg status --copies --rev 'desc("a-2")' --rev 'desc("mAEm-0")'
-  M f
-    b (no-filelog !)
+  M f (no-changeset !)
+    b (no-filelog no-changeset !)
   R b
   $ hg status --copies --rev 'desc("a-2")' --rev 'desc("mEAm-0")'
-  M f
-    b (no-filelog !)
+  M f (no-changeset !)
+    b (no-filelog no-changeset !)
   R b
   $ hg status --copies --rev 'desc("e-2")' --rev 'desc("mAEm-0")'
-  M f
-    d (no-filelog !)
+  M f (no-changeset !)
+    d (no-filelog no-changeset !)
   R d
   $ hg status --copies --rev 'desc("e-2")' --rev 'desc("mEAm-0")'
-  M f
-    d (no-filelog !)
+  M f (no-changeset !)
+    d (no-filelog no-changeset !)
   R d
   $ hg status --copies --rev 'desc("i-2")' --rev 'desc("a-2")'
   A f
@@ -1258,15 +2412,24 @@
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAEm-0")'
   A f
     a
+  A t
+    p
   R a
   R b
+  R p
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEAm-0")'
   A f
     a (filelog !)
     b (no-filelog !)
+  A t
+    p
   R a
   R b
-
+  R p
+
+
+Subcase: existing copy information overwritten on one branch
+````````````````````````````````````````````````````````````
 
 Note:
 | In this case, one of the merge wrongly record a merge while there is none.
@@ -1278,90 +2441,196 @@
 - one overwriting a file (d) with a rename (from h to i to d)
 
   $ hg log -G --rev '::(desc("mBFm")+desc("mFBm"))'
-  o    24 mFBm-0 simple merge - the other way
+  o    mFBm-0 simple merge - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - the other way
   |\
-  +---o  23 mBFm-0 simple merge - one way
+  +---o  mBFm-0 simple merge - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - one way
   | |/
-  | o  22 f-2: rename i -> d
+  | o  f-2: rename i -> d
   | |
-  | o  21 f-1: rename h -> i
+  | o  f-1: rename h -> i
   | |
-  o |  5 b-1: b update
+  o |  b-1: b update
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBFm-0")'
   M b
   A d
     h
+  A t
+    p
   R a
   R h
+  R p
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFBm-0")'
   M b
   A d
     h
+  A t
+    p
   R a
   R h
+  R p
   $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBFm-0")'
-  M d
-    h (no-filelog !)
+  M d (no-changeset !)
+    h (no-filelog no-changeset !)
   R h
   $ hg status --copies --rev 'desc("f-2")' --rev 'desc("mBFm-0")'
   M b
   $ hg status --copies --rev 'desc("f-1")' --rev 'desc("mBFm-0")'
   M b
-  M d
-    i (no-filelog !)
+  M d (no-changeset !)
+    i (no-filelog no-changeset !)
   R i
   $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mFBm-0")'
-  M d
-    h (no-filelog !)
+  M d (no-changeset !)
+    h (no-filelog no-changeset !)
   R h
   $ hg status --copies --rev 'desc("f-2")' --rev 'desc("mFBm-0")'
   M b
   $ hg status --copies --rev 'desc("f-1")' --rev 'desc("mFBm-0")'
   M b
-  M d
-    i (no-filelog !)
+  M d (no-changeset !)
+    i (no-filelog no-changeset !)
   R i
 
 #if no-changeset
   $ hg log -Gfr 'desc("mBFm-0")' d
-  o  22 f-2: rename i -> d
+  o  f-2: rename i -> d
   |
-  o  21 f-1: rename h -> i
+  o  f-1: rename h -> i
   :
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 #else
 BROKEN: `hg log --follow <file>` relies on filelog metadata to work
   $ hg log -Gfr 'desc("mBFm-0")' d
-  o  22 f-2: rename i -> d
+  o  i-2: c -move-> d, s -move-> t
   |
   ~
 #endif
 
 #if no-changeset
   $ hg log -Gfr 'desc("mFBm-0")' d
-  o  22 f-2: rename i -> d
+  o  f-2: rename i -> d
   |
-  o  21 f-1: rename h -> i
+  o  f-1: rename h -> i
   :
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 #else
 BROKEN: `hg log --follow <file>` relies on filelog metadata to work
   $ hg log -Gfr 'desc("mFBm-0")' d
-  o  22 f-2: rename i -> d
+  o  i-2: c -move-> d, s -move-> t
   |
   ~
 #endif
 
 
+Subcase: existing copy information overwritten on one branch, with different content)
+`````````````````````````````````````````````````````````````````````````````````````
+
+Merge:
+- one with change to an unrelated file (b)
+- one overwriting a file (t) with a rename (from r to x to t), v content is not the same as on the other branch
+
+  $ hg log -G --rev '::(desc("mBRm")+desc("mRBm"))'
+  o    mRBm-0 simple merge - B side: unrelated change, R side: overwrite d with a copy (from r->x->t) different content - the other way
+  |\
+  +---o  mBRm-0 simple merge - B side: unrelated change, R side: overwrite d with a copy (from r->x->t) different content - one way
+  | |/
+  | o  r-2: rename t -> x
+  | |
+  | o  r-1: rename r -> x
+  | |
+  o |  b-1: b update
+  |/
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
+  |
+  o  i-0 initial commit: a b h p q r
+  
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBRm-0")'
+  M b
+  A d
+    a
+  A t
+    r
+  R a
+  R p
+  R r
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mRBm-0")'
+  M b
+  A d
+    a
+  A t
+    r
+  R a
+  R p
+  R r
+  $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBRm-0")'
+  M t
+    r (no-filelog !)
+  R r
+  $ hg status --copies --rev 'desc("r-2")' --rev 'desc("mBRm-0")'
+  M b
+  $ hg status --copies --rev 'desc("r-1")' --rev 'desc("mBRm-0")'
+  M b
+  M t
+    x (no-filelog !)
+  R x
+  $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mRBm-0")'
+  M t
+    r (no-filelog !)
+  R r
+  $ hg status --copies --rev 'desc("r-2")' --rev 'desc("mRBm-0")'
+  M b
+  $ hg status --copies --rev 'desc("r-1")' --rev 'desc("mRBm-0")'
+  M b
+  M t
+    x (no-filelog !)
+  R x
+
+#if no-changeset
+  $ hg log -Gfr 'desc("mBRm-0")' d
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
+  |
+  o  i-0 initial commit: a b h p q r
+  
+#else
+BROKEN: `hg log --follow <file>` relies on filelog metadata to work
+  $ hg log -Gfr 'desc("mBRm-0")' d
+  o  i-2: c -move-> d, s -move-> t
+  |
+  ~
+#endif
+
+#if no-changeset
+  $ hg log -Gfr 'desc("mRBm-0")' d
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
+  |
+  o  i-0 initial commit: a b h p q r
+  
+#else
+BROKEN: `hg log --follow <file>` relies on filelog metadata to work
+  $ hg log -Gfr 'desc("mRBm-0")' d
+  o  i-2: c -move-> d, s -move-> t
+  |
+  ~
+#endif
+
+Subcase: reset of the copy history on one side
+``````````````````````````````````````````````
+
 Merge:
 - one with change to a file
 - one deleting and recreating the file
@@ -1370,21 +2639,21 @@
 consider history and rename on both branch of the merge.
 
   $ hg log -G --rev '::(desc("mDGm")+desc("mGDm"))'
-  o    27 mGDm-0 simple merge - the other way
+  o    mGDm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - the other way
   |\
-  +---o  26 mDGm-0 simple merge - one way
+  +---o  mDGm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - one way
   | |/
-  | o  25 g-1: update d
+  | o  g-1: update d
   | |
-  o |  8 d-2 re-add d
+  o |  d-2 re-add d
   | |
-  o |  7 d-1 delete d
+  o |  d-1 delete d
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 One side of the merge have a long history with rename. The other side of the
 merge point to a new file with a smaller history. Each side is "valid".
@@ -1395,11 +2664,17 @@
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mDGm-0")'
   A d
     a (filelog !)
+  A t
+    p
   R a
+  R p
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGDm-0")'
   A d
     a
+  A t
+    p
   R a
+  R p
   $ hg status --copies --rev 'desc("d-2")' --rev 'desc("mDGm-0")'
   M d
   $ hg status --copies --rev 'desc("d-2")' --rev 'desc("mGDm-0")'
@@ -1411,28 +2686,28 @@
 
 #if no-changeset
   $ hg log -Gfr 'desc("mDGm-0")' d
-  o    26 mDGm-0 simple merge - one way
+  o    mDGm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - one way
   |\
-  | o  25 g-1: update d
+  | o  g-1: update d
   | |
-  o |  8 d-2 re-add d
+  o |  d-2 re-add d
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 #else
 BROKEN: `hg log --follow <file>` relies on filelog metadata to work
   $ hg log -Gfr 'desc("mDGm-0")' d
-  o    26 mDGm-0 simple merge - one way
+  o    mDGm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - one way
   |\
-  | o  25 g-1: update d
+  | o  g-1: update d
   | |
-  o |  8 d-2 re-add d
+  o |  d-2 re-add d
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
   ~
 #endif
@@ -1440,32 +2715,34 @@
 
 #if no-changeset
   $ hg log -Gfr 'desc("mDGm-0")' d
-  o    26 mDGm-0 simple merge - one way
+  o    mDGm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - one way
   |\
-  | o  25 g-1: update d
+  | o  g-1: update d
   | |
-  o |  8 d-2 re-add d
+  o |  d-2 re-add d
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 #else
 BROKEN: `hg log --follow <file>` relies on filelog metadata to work
   $ hg log -Gfr 'desc("mDGm-0")' d
-  o    26 mDGm-0 simple merge - one way
+  o    mDGm-0 actual content merge, copies on one side - D side: delete and re-add (different content), G side: update content - one way
   |\
-  | o  25 g-1: update d
+  | o  g-1: update d
   | |
-  o |  8 d-2 re-add d
+  o |  d-2 re-add d
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
   ~
 #endif
 
+Subcase: merging a change to a file with a "copy overwrite" to that file from another branch
+````````````````````````````````````````````````````````````````````````````````````````````
 
 Merge:
 - one with change to a file (d)
@@ -1476,21 +2753,21 @@
 
 
   $ hg log -G --rev '::(desc("mGFm")+desc("mFGm"))'
-  o    29 mGFm-0 simple merge - the other way
+  o    mGFm-0 merge - G side: content change, F side: copy overwrite, no content change - the other way
   |\
-  +---o  28 mFGm-0 simple merge - one way
+  +---o  mFGm-0 merge - G side: content change, F side: copy overwrite, no content change - one way
   | |/
-  | o  25 g-1: update d
+  | o  g-1: update d
   | |
-  o |  22 f-2: rename i -> d
+  o |  f-2: rename i -> d
   | |
-  o |  21 f-1: rename h -> i
+  o |  f-1: rename h -> i
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
 Note:
@@ -1504,15 +2781,15 @@
 Details on this hash ordering pick:
 
   $ hg manifest --debug 'desc("g-1")' | egrep 'd$'
-  f2b277c39e0d2bbac99d8aae075c0d8b5304d266 644   d (no-changeset !)
-  4ff57b4e8dceedb487e70e6965ea188a7c042cca 644   d (changeset !)
+  17ec97e605773eb44a117d1136b3849bcdc1924f 644   d (no-changeset !)
+  5cce88bf349f7c742bb440f2c53f81db9c294279 644   d (changeset !)
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("g-1")' d
   A d
     a (no-changeset no-compatibility !)
 
   $ hg manifest --debug 'desc("f-2")' | egrep 'd$'
-  4a067cf8965d1bfff130057ade26b44f580231be 644   d (no-changeset !)
-  fe6f8b4f507fe3eb524c527192a84920a4288dac 644   d (changeset !)
+  7b79e2fe0c8924e0e598a82f048a7b024afa4d96 644   d (no-changeset !)
+  ae258f702dfeca05bf9b6a22a97a4b5645570f11 644   d (changeset !)
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("f-2")' d
   A d
     h (no-changeset no-compatibility !)
@@ -1521,15 +2798,22 @@
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFGm-0")'
   A d
-    h
+    h (no-filelog !)
+    a (filelog !)
+  A t
+    p
   R a
   R h
+  R p
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGFm-0")'
   A d
-    a (no-filelog !)
-    h (filelog !)
+    a (no-changeset !)
+    h (changeset !)
+  A t
+    p
   R a
   R h
+  R p
   $ hg status --copies --rev 'desc("f-2")' --rev 'desc("mFGm-0")'
   M d
   $ hg status --copies --rev 'desc("f-2")' --rev 'desc("mGFm-0")'
@@ -1543,74 +2827,194 @@
     i (no-filelog !)
   R i
   $ hg status --copies --rev 'desc("g-1")' --rev 'desc("mFGm-0")'
-  M d
-    h (no-filelog !)
+  M d (no-changeset !)
+    h (no-filelog no-changeset !)
   R h
   $ hg status --copies --rev 'desc("g-1")' --rev 'desc("mGFm-0")'
-  M d
-    h (no-filelog !)
+  M d (no-changeset !)
+    h (no-filelog no-changeset !)
   R h
 
 #if no-changeset
   $ hg log -Gfr 'desc("mFGm-0")' d
-  o    28 mFGm-0 simple merge - one way
+  o    mFGm-0 merge - G side: content change, F side: copy overwrite, no content change - one way
   |\
-  | o  25 g-1: update d
+  | o  g-1: update d
   | |
-  o |  22 f-2: rename i -> d
+  o |  f-2: rename i -> d
   | |
-  o |  21 f-1: rename h -> i
+  o |  f-1: rename h -> i
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 #else
 BROKEN: `hg log --follow <file>` relies on filelog metadata to work
   $ hg log -Gfr 'desc("mFGm-0")' d
-  o    28 mFGm-0 simple merge - one way
-  |\
-  | o  25 g-1: update d
-  | |
-  o |  22 f-2: rename i -> d
-  |/
-  o  2 i-2: c -move-> d
+  o  g-1: update d
+  |
+  o  i-2: c -move-> d, s -move-> t
   |
   ~
 #endif
 
 #if no-changeset
   $ hg log -Gfr 'desc("mGFm-0")' d
-  o    29 mGFm-0 simple merge - the other way
+  o    mGFm-0 merge - G side: content change, F side: copy overwrite, no content change - the other way
   |\
-  | o  25 g-1: update d
+  | o  g-1: update d
   | |
-  o |  22 f-2: rename i -> d
+  o |  f-2: rename i -> d
   | |
-  o |  21 f-1: rename h -> i
+  o |  f-1: rename h -> i
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 #else
 BROKEN: `hg log --follow <file>` relies on filelog metadata to work
   $ hg log -Gfr 'desc("mGFm-0")' d
-  o    29 mGFm-0 simple merge - the other way
-  |\
-  | o  25 g-1: update d
-  | |
-  o |  22 f-2: rename i -> d
-  |/
-  o  2 i-2: c -move-> d
+  o  g-1: update d
+  |
+  o  i-2: c -move-> d, s -move-> t
   |
   ~
 #endif
 
+Subcase: new copy information on both side with an actual merge happening
+`````````````````````````````````````````````````````````````````````````
+
+- the "p-" branch renaming 't' to 'v' (through 'u')
+- the "q-" branch renaming 'r' to 'v' (through 'w')
+
+
+  $ hg log -G --rev '::(desc("mPQm")+desc("mQPm"))'
+  o    mQPm-0 merge with copies info on both side - P side: rename t to v, Q side: r to v, (different content) - the other way
+  |\
+  +---o  mPQm-0 merge with copies info on both side - P side: rename t to v, Q side: r to v, (different content) - one way
+  | |/
+  | o  q-2 w -move-> v
+  | |
+  | o  q-1 r -move-> w
+  | |
+  o |  p-2: u -move-> v
+  | |
+  o |  p-1: t -move-> u
+  |/
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
+  |
+  o  i-0 initial commit: a b h p q r
+  
+
+#if no-changeset
+  $ hg manifest --debug --rev 'desc("mPQm-0")' | grep '644   v'
+  0946c662ef16e4e67397fd717389eb6693d41749 644   v
+  $ hg manifest --debug --rev 'desc("mQPm-0")' | grep '644   v'
+  0db3aad7fcc1ec27fab57060e327b9e864ea0cc9 644   v
+  $ hg manifest --debug --rev 'desc("p-2")' | grep '644   v'
+  3f91841cd75cadc9a1f1b4e7c1aa6d411f76032e 644   v
+  $ hg manifest --debug --rev 'desc("q-2")' | grep '644   v'
+  c43c088b811fd27983c0a9aadf44f3343cd4cd7e 644   v
+  $ hg debugindex v | ../no-linkrev
+     rev linkrev nodeid       p1           p2
+       0       * 3f91841cd75c 000000000000 000000000000
+       1       * c43c088b811f 000000000000 000000000000
+       2       * 0946c662ef16 3f91841cd75c c43c088b811f
+       3       * 0db3aad7fcc1 c43c088b811f 3f91841cd75c
+#else
+  $ hg manifest --debug --rev 'desc("mPQm-0")' | grep '644   v'
+  65fde9f6e4d4da23b3f610e07b53673ea9541d75 644   v
+  $ hg manifest --debug --rev 'desc("mQPm-0")' | grep '644   v'
+  a098dda6413aecf154eefc976afc38b295acb7e5 644   v
+  $ hg manifest --debug --rev 'desc("p-2")' | grep '644   v'
+  5aed6a8dbff0301328c08360d24354d3d064cf0d 644   v
+  $ hg manifest --debug --rev 'desc("q-2")' | grep '644   v'
+  a38b2fa170219750dac9bc7d19df831f213ba708 644   v
+  $ hg debugindex v | ../no-linkrev
+     rev linkrev nodeid       p1           p2
+       0       * 5aed6a8dbff0 000000000000 000000000000
+       1       * a38b2fa17021 000000000000 000000000000
+       2       * 65fde9f6e4d4 5aed6a8dbff0 a38b2fa17021
+       3       * a098dda6413a a38b2fa17021 5aed6a8dbff0
+#endif
+
+# Here the filelog based implementation is not looking at the rename
+# information (because the file exist on both side). However the changelog
+# based on works fine. We have different output.
+
+  $ hg status --copies --rev 'desc("p-2")' --rev 'desc("mPQm-0")'
+  M v
+    r (no-filelog !)
+  R r
+  $ hg status --copies --rev 'desc("p-2")' --rev 'desc("mQPm-0")'
+  M v
+    r (no-filelog !)
+  R r
+  $ hg status --copies --rev 'desc("q-2")' --rev 'desc("mPQm-0")'
+  M v
+    t (no-filelog !)
+  R t
+  $ hg status --copies --rev 'desc("q-2")' --rev 'desc("mQPm-0")'
+  M v
+    t (no-filelog !)
+  R t
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("p-2")'
+  A v
+    t
+  R t
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("q-2")'
+  A v
+    r
+  R r
+
+# From here, we run status against revision where both source file exists.
+#
+# The filelog based implementation picks an arbitrary side based on revision
+# numbers. So the same side "wins" whatever the parents order is. This is
+# sub-optimal because depending on revision numbers means the result can be
+# different from one repository to the next.
+#
+# The changeset based algorithm use the parent order to break tie on conflicting
+# information and will have a different order depending on who is p1 and p2.
+# That order is stable accross repositories. (data from p1 prevails)
+
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mPQm-0")'
+  A v
+    t
+  R r
+  R t
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mQPm-0")'
+  A v
+    t (filelog !)
+    r (no-filelog !)
+  R r
+  R t
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mPQm-0")'
+  A d
+    a
+  A v
+    r (filelog !)
+    p (no-filelog !)
+  R a
+  R p
+  R r
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mQPm-0")'
+  A d
+    a
+  A v
+    r
+  R a
+  R p
+  R r
+
 
 Comparing with merging with a deletion (and keeping the file)
 -------------------------------------------------------------
@@ -1624,19 +3028,19 @@
 copy tracing chain.
 
   $ hg log -G --rev '::(desc("mCGm")+desc("mGCm"))'
-  o    31 mGCm-0
+  o    mGCm-0 merge updated/deleted - revive the file (updated content) - the other way
   |\
-  +---o  30 mCGm-0
+  +---o  mCGm-0 merge updated/deleted - revive the file (updated content) - one way
   | |/
-  | o  25 g-1: update d
+  | o  g-1: update d
   | |
-  o |  6 c-1 delete d
+  o |  c-1 delete d
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
 'a' is the copy source of 'd'
@@ -1644,11 +3048,17 @@
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCGm-0")'
   A d
     a (no-compatibility no-changeset !)
+  A t
+    p
   R a
+  R p
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGCm-0")'
   A d
     a (no-compatibility no-changeset !)
+  A t
+    p
   R a
+  R p
   $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mCGm-0")'
   A d
   $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mGCm-0")'
@@ -1669,19 +3079,19 @@
 copy tracing chain.
 
   $ hg log -G --rev '::(desc("mCB-revert-m")+desc("mBC-revert-m"))'
-  o    33 mBC-revert-m-0
+  o    mBC-revert-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - the other way
   |\
-  +---o  32 mCB-revert-m-0
+  +---o  mCB-revert-m-0 merge explicitely revive deleted file - B side: unrelated change, C side: delete d (restored by merge) - one way
   | |/
-  | o  6 c-1 delete d
+  | o  c-1 delete d
   | |
-  o |  5 b-1: b update
+  o |  b-1: b update
   |/
-  o  2 i-2: c -move-> d
+  o  i-2: c -move-> d, s -move-> t
   |
-  o  1 i-1: a -move-> c
+  o  i-1: a -move-> c, p -move-> s
   |
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
 'a' is the the copy source of 'd'
@@ -1690,12 +3100,18 @@
   M b
   A d
     a (no-compatibility no-changeset !)
+  A t
+    p
   R a
+  R p
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBC-revert-m-0")'
   M b
   A d
     a (no-compatibility no-changeset !)
+  A t
+    p
   R a
+  R p
   $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mCB-revert-m-0")'
   M b
   A d
@@ -1715,31 +3131,37 @@
 (the copy information from the branch that was not deleted should win).
 
   $ hg log -G --rev '::(desc("mCH-delete-before-conflict-m")+desc("mHC-delete-before-conflict-m"))'
-  o    36 mHC-delete-before-conflict-m-0
+  o    mHC-delete-before-conflict-m-0 simple merge - C side: d is the results of renames then deleted, H side: d is result of another rename (same content as the other branch) - the other way
   |\
-  +---o  35 mCH-delete-before-conflict-m-0
+  +---o  mCH-delete-before-conflict-m-0 simple merge - C side: d is the results of renames then deleted, H side: d is result of another rename (same content as the other branch) - one way
   | |/
-  | o  34 h-1: b -(move)-> d
+  | o  h-1: b -(move)-> d
   | |
-  o |  6 c-1 delete d
+  o |  c-1 delete d
   | |
-  o |  2 i-2: c -move-> d
+  o |  i-2: c -move-> d, s -move-> t
   | |
-  o |  1 i-1: a -move-> c
+  o |  i-1: a -move-> c, p -move-> s
   |/
-  o  0 i-0 initial commit: a b h
+  o  i-0 initial commit: a b h p q r
   
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCH-delete-before-conflict-m")'
   A d
     b (no-compatibility no-changeset !)
+  A t
+    p
   R a
   R b
+  R p
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mHC-delete-before-conflict-m")'
   A d
     b
+  A t
+    p
   R a
   R b
+  R p
   $ hg status --copies --rev 'desc("c-1")' --rev 'desc("mCH-delete-before-conflict-m")'
   A d
     b
@@ -1749,6 +3171,541 @@
     b
   R b
   $ hg status --copies --rev 'desc("h-1")' --rev 'desc("mCH-delete-before-conflict-m")'
+  A t
+    p
   R a
+  R p
   $ hg status --copies --rev 'desc("h-1")' --rev 'desc("mHC-delete-before-conflict-m")'
+  A t
+    p
   R a
+  R p
+
+Variant of previous with extra changes introduced by the merge
+--------------------------------------------------------------
+
+(see case declaration for details)
+
+Subcase: merge has same initial content on both side, but merge introduced a change
+```````````````````````````````````````````````````````````````````````````````````
+
+- the "e-" branch renaming b to f (through 'g')
+- the "a-" branch renaming d to f (through e)
+- the merge add new change to b
+
+  $ hg log -G --rev '::(desc("mAE-change-m")+desc("mEA-change-m"))'
+  o    mEA-change-m-0 merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent) - the other way
+  |\
+  +---o  mAE-change-m-0 merge with file update and copies info on both side - A side: rename d to f, E side: b to f, (same content for f in parent) - one way
+  | |/
+  | o  e-2 g -move-> f
+  | |
+  | o  e-1 b -move-> g
+  | |
+  o |  a-2: e -move-> f
+  | |
+  o |  a-1: d -move-> e
+  |/
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
+  |
+  o  i-0 initial commit: a b h p q r
+  
+#if no-changeset
+  $ hg manifest --debug --rev 'desc("mAE-change-m-0")' | grep '644   f'
+  2f649fba7eb284e720d02b61f0546fcef694c045 644   f
+  $ hg manifest --debug --rev 'desc("mEA-change-m-0")' | grep '644   f'
+  774e7c1637d536b99e2d8ef16fd731f87a82bd09 644   f
+  $ hg manifest --debug --rev 'desc("a-2")' | grep '644   f'
+  b76eb76580df486c3d51d63c5c210d4dd43a8ac7 644   f
+  $ hg manifest --debug --rev 'desc("e-2")' | grep '644   f'
+  e8825b386367b29fec957283a80bb47b47483fe1 644   f
+  $ hg debugindex f | ../no-linkrev
+     rev linkrev nodeid       p1           p2
+       0       * b76eb76580df 000000000000 000000000000
+       1       * e8825b386367 000000000000 000000000000
+       2       * 2ff93c643948 b76eb76580df e8825b386367
+       3       * 2f649fba7eb2 b76eb76580df e8825b386367
+       4       * 774e7c1637d5 e8825b386367 b76eb76580df
+#else
+  $ hg manifest --debug --rev 'desc("mAE-change-m-0")' | grep '644   f'
+  d3613c1ec8310a812ac4268fd853ac576b6caea5 644   f
+  $ hg manifest --debug --rev 'desc("mEA-change-m-0")' | grep '644   f'
+  05e03c868bbcab4a649cb33a238d7aa07398a469 644   f
+  $ hg manifest --debug --rev 'desc("a-2")' | grep '644   f'
+  ae258f702dfeca05bf9b6a22a97a4b5645570f11 644   f
+  $ hg manifest --debug --rev 'desc("e-2")' | grep '644   f'
+  ae258f702dfeca05bf9b6a22a97a4b5645570f11 644   f
+  $ hg debugindex f | ../no-linkrev
+     rev linkrev nodeid       p1           p2
+       0       * ae258f702dfe 000000000000 000000000000
+       1       * d3613c1ec831 ae258f702dfe 000000000000
+       2       * 05e03c868bbc ae258f702dfe 000000000000
+#endif
+
+# Here the filelog based implementation is not looking at the rename
+# information (because the file exist on both side). However the changelog
+# based on works fine. We have different output.
+
+  $ hg status --copies --rev 'desc("a-2")' --rev 'desc("mAE-change-m-0")'
+  M f
+    b (no-filelog !)
+  R b
+  $ hg status --copies --rev 'desc("a-2")' --rev 'desc("mEA-change-m-0")'
+  M f
+    b (no-filelog !)
+  R b
+  $ hg status --copies --rev 'desc("e-2")' --rev 'desc("mAE-change-m-0")'
+  M f
+    d (no-filelog !)
+  R d
+  $ hg status --copies --rev 'desc("e-2")' --rev 'desc("mEA-change-m-0")'
+  M f
+    d (no-filelog !)
+  R d
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("a-2")'
+  A f
+    d
+  R d
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("e-2")'
+  A f
+    b
+  R b
+
+# From here, we run status against revision where both source file exists.
+#
+# The filelog based implementation picks an arbitrary side based on revision
+# numbers. So the same side "wins" whatever the parents order is. This is
+# sub-optimal because depending on revision numbers means the result can be
+# different from one repository to the next.
+#
+# The changeset based algorithm use the parent order to break tie on conflicting
+# information and will have a different order depending on who is p1 and p2.
+# That order is stable accross repositories. (data from p1 prevails)
+
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mAE-change-m-0")'
+  A f
+    d
+  R b
+  R d
+  $ hg status --copies --rev 'desc("i-2")' --rev 'desc("mEA-change-m-0")'
+  A f
+    d (filelog !)
+    b (no-filelog !)
+  R b
+  R d
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE-change-m-0")'
+  A f
+    a
+  A t
+    p
+  R a
+  R b
+  R p
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA-change-m-0")'
+  A f
+    a (filelog !)
+    b (no-filelog !)
+  A t
+    p
+  R a
+  R b
+  R p
+
+
+Subcase: merge overwrite common copy information, but with extra change during the merge
+```````````````````````````````````````````````````````````````````````````````````
+
+Merge:
+- one with change to an unrelated file (b)
+- one overwriting a file (d) with a rename (from h to i to d)
+
+  $ hg log -G --rev '::(desc("mBF-change-m")+desc("mFB-change-m"))'
+  o    mFB-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - the other way
+  |\
+  +---o  mBF-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - one way
+  | |/
+  | o  f-2: rename i -> d
+  | |
+  | o  f-1: rename h -> i
+  | |
+  o |  b-1: b update
+  |/
+  o  i-2: c -move-> d, s -move-> t
+  |
+  o  i-1: a -move-> c, p -move-> s
+  |
+  o  i-0 initial commit: a b h p q r
+  
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBF-change-m-0")'
+  M b
+  A d
+    h (filelog !)
+    h (sidedata !)
+    a (upgraded known-bad-output !)
+    h (upgraded missing-correct-output !)
+    a (upgraded-parallel known-bad-output !)
+    h (upgraded-parallel missing-correct-output !)
+    h (changeset !)
+    h (compatibility !)
+  A t
+    p
+  R a
+  R h
+  R p
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFB-change-m-0")'
+  M b
+  A d
+    h (filelog missing-correct-output !)
+    a (filelog known-bad-output !)
+    h (sidedata !)
+    h (upgraded !)
+    h (upgraded-parallel !)
+    h (changeset !)
+    h (compatibility !)
+  A t
+    p
+  R a
+  R h
+  R p
+  $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBF-change-m-0")'
+  M d
+    h (no-filelog !)
+  R h
+  $ hg status --copies --rev 'desc("f-2")' --rev 'desc("mBF-change-m-0")'
+  M b
+  M d
+  $ hg status --copies --rev 'desc("f-1")' --rev 'desc("mBF-change-m-0")'
+  M b
+  M d
+    i (no-filelog !)
+  R i
+  $ hg status --copies --rev 'desc("b-1")' --rev 'desc("mFB-change-m-0")'
+  M d
+    h (no-filelog !)
+  R h
+  $ hg status --copies --rev 'desc("f-2")' --rev 'desc("mFB-change-m-0")'
+  M b
+  M d
+  $ hg status --copies --rev 'desc("f-1")' --rev 'desc("mFB-change-m-0")'
+  M b
+  M d
+    i (no-filelog !)
+  R i
+
+#if no-changeset
+  $ hg log -Gfr 'desc("mBF-change-m-0")' d
+  o    mBF-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - one way
+  |\
+  o :  f-2: rename i -> d
+  | :
+  o :  f-1: rename h -> i
+  :/
+  o  i-0 initial commit: a b h p q r
+  
+#else
+BROKEN: `hg log --follow <file>` relies on filelog metadata to work
+  $ hg log -Gfr 'desc("mBF-change-m-0")' d
+  o  mBF-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - one way
+  :
+  o  i-2: c -move-> d, s -move-> t
+  |
+  ~
+#endif
+
+#if no-changeset
+  $ hg log -Gfr 'desc("mFB-change-m-0")' d
+  o    mFB-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - the other way
+  |\
+  o :  f-2: rename i -> d
+  | :
+  o :  f-1: rename h -> i
+  :/
+  o  i-2: c -move-> d, s -move-> t (known-bad-output !)
+  | (known-bad-output !)
+  o  i-1: a -move-> c, p -move-> s (known-bad-output !)
+  | (known-bad-output !)
+  o  i-0 initial commit: a b h p q r
+  
+#else
+BROKEN: `hg log --follow <file>` relies on filelog metadata to work
+  $ hg log -Gfr 'desc("mFB-change-m-0")' d
+  o  mFB-change-m-0 merge with extra change - B side: unrelated change, F side: overwrite d with a copy (from h->i->d) - the other way
+  :
+  o  i-2: c -move-> d, s -move-> t
+  |
+  ~
+#endif
+
+
+Decision from previous merge are properly chained with later merge
+------------------------------------------------------------------
+
+
+Subcase: chaining conflicting rename resolution
+```````````````````````````````````````````````
+
+The "mAEm" and "mEAm" case create a rename tracking conflict on file 'f'. We
+add more change on the respective branch and merge again. These second merge
+does not involve the file 'f' and the arbitration done within "mAEm" and "mEA"
+about that file should stay unchanged.
+
+The result from mAEm is the same for the subsequent merge:
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAEm")' f
+  A f
+    a (filelog !)
+    a (sidedata !)
+    a (upgraded !)
+    a (upgraded-parallel !)
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE,Km")' f
+  A f
+    a (filelog !)
+    a (sidedata !)
+    a (upgraded !)
+    a (upgraded-parallel !)
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mK,AEm")' f
+  A f
+    a (filelog !)
+    a (sidedata !)
+    a (upgraded !)
+    a (upgraded-parallel !)
+
+
+The result from mEAm is the same for the subsequent merge:
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEAm")' f
+  A f
+    a (filelog !)
+    b (sidedata !)
+    b (upgraded !)
+    b (upgraded-parallel !)
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA,Jm")' f
+  A f
+    a (filelog !)
+    b (sidedata !)
+    b (upgraded !)
+    b (upgraded-parallel !)
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mJ,EAm")' f
+  A f
+    a (filelog !)
+    b (sidedata !)
+    b (upgraded !)
+    b (upgraded-parallel !)
+
+Subcase: chaining conflicting rename resolution
+```````````````````````````````````````````````
+
+The "mPQm" and "mQPm" case create a rename tracking conflict on file 'v'. We
+add more change on the respective branch and merge again. These second merge
+does not involve the file 'v' and the arbitration done within "mPQm" and "mQP"
+about that file should stay unchanged.
+
+The result from mPQm is the same for the subsequent merge:
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mPQm")' v
+  A v
+    r (filelog !)
+    p (sidedata !)
+    p (upgraded !)
+    p (upgraded-parallel !)
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mPQ,Tm")' v
+  A v
+    r (filelog !)
+    p (sidedata !)
+    p (upgraded !)
+    p (upgraded-parallel !)
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mT,PQm")' v
+  A v
+    r (filelog !)
+    p (sidedata !)
+    p (upgraded !)
+    p (upgraded-parallel !)
+
+
+The result from mQPm is the same for the subsequent merge:
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mQPm")' v
+  A v
+    r (no-changeset no-compatibility !)
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mQP,Sm")' v
+  A v
+    r (no-changeset no-compatibility !)
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mS,QPm")' v
+  A v
+    r (filelog !)
+    r (sidedata !)
+    r (upgraded !)
+    r (upgraded-parallel !)
+
+
+Subcase: chaining salvage information during a merge
+````````````````````````````````````````````````````
+
+We add more change on the branch were the file was deleted. merging again
+should preserve the fact eh file was salvaged.
+
+reference output:
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCB-revert-m-0")'
+  M b
+  A d
+    a (no-changeset no-compatibility !)
+  A t
+    p
+  R a
+  R p
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBC-revert-m-0")'
+  M b
+  A d
+    a (no-changeset no-compatibility !)
+  A t
+    p
+  R a
+  R p
+
+chained output
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBC+revert,Lm")'
+  M b
+  A d
+    a (no-changeset no-compatibility !)
+  A t
+    p
+  A unrelated-l
+  R a
+  R p
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mCB+revert,Lm")'
+  M b
+  A d
+    a (no-changeset no-compatibility !)
+  A t
+    p
+  A unrelated-l
+  R a
+  R p
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mL,BC+revertm")'
+  M b
+  A d
+    a (no-changeset no-compatibility !)
+  A t
+    p
+  A unrelated-l
+  R a
+  R p
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mL,CB+revertm")'
+  M b
+  A d
+    a (no-changeset no-compatibility !)
+  A t
+    p
+  A unrelated-l
+  R a
+  R p
+
+Subcase: chaining "merged" information during a merge
+``````````````````````````````````````````````````````
+
+When a non-rename change are merged with a copy overwrite, the merge pick the copy source from (p1) as the reference. We should preserve this information in subsequent merges.
+
+
+reference output:
+
+ (for details about the filelog pick, check the mFGm/mGFm case)
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFGm")' d
+  A d
+    a (filelog !)
+    h (sidedata !)
+    h (upgraded !)
+    h (upgraded-parallel !)
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGFm")' d
+  A d
+    a (filelog !)
+    a (sidedata !)
+    a (upgraded !)
+    a (upgraded-parallel !)
+
+Chained output
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mO,FGm")' d
+  A d
+    a (filelog !)
+    h (sidedata !)
+    h (upgraded !)
+    h (upgraded-parallel !)
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFG,Om")' d
+  A d
+    a (filelog !)
+    h (sidedata !)
+    h (upgraded !)
+    h (upgraded-parallel !)
+
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGF,Nm")' d
+  A d
+    a (no-changeset no-compatibility !)
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mN,GFm")' d
+  A d
+    a (no-changeset no-compatibility !)
+
+
+Subcase: chaining conflicting rename resolution, with extra change during the merge
+```````````````````````````````````````````````````````````````````````````````````
+
+The "mAEm" and "mEAm" case create a rename tracking conflict on file 'f'. We
+add more change on the respective branch and merge again. These second merge
+does not involve the file 'f' and the arbitration done within "mAEm" and "mEA"
+about that file should stay unchanged.
+
+The result from mAEm is the same for the subsequent merge:
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE-change-m")' f
+  A f
+    a (filelog !)
+    a (sidedata !)
+    a (upgraded !)
+    a (upgraded-parallel !)
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE-change,Km")' f
+  A f
+    a (filelog !)
+    a (sidedata !)
+    a (upgraded !)
+    a (upgraded-parallel !)
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mK,AE-change-m")' f
+  A f
+    a (no-changeset no-compatibility !)
+
+
+The result from mEAm is the same for the subsequent merge:
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA-change-m")' f
+  A f
+    a (filelog !)
+    b (sidedata !)
+    b (upgraded !)
+    b (upgraded-parallel !)
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA-change,Jm")' f
+  A f
+    a (filelog !)
+    b (sidedata !)
+    b (upgraded !)
+    b (upgraded-parallel !)
+
+  $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mJ,EA-change-m")' f
+  A f
+    a (filelog !)
+    b (sidedata !)
+    b (upgraded !)
+    b (upgraded-parallel !)
--- a/tests/test-copies-in-changeset.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-copies-in-changeset.t	Thu Mar 18 18:24:59 2021 -0400
@@ -39,9 +39,9 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:           yes    yes      no
   persistent-nodemap:  no     no      no
   copies-sdc:         yes    yes      no
+  revlog-v2:          yes    yes      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib
   compression-level:  default default default
@@ -53,9 +53,9 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:            no     no      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no     no      no
+  revlog-v2:           no     no      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib
   compression-level:  default default default
@@ -345,7 +345,10 @@
   $ hg co -q 0
   $ hg mv a b
   $ hg ci -qm 'rename a to b'
-  $ hg rebase -d 1 --config rebase.experimental.inmemory=yes
+Not only do we want this to run in-memory, it shouldn't fall back to
+on-disk merge (no conflicts), so we force it to be in-memory
+with no fallback.
+  $ hg rebase -d 1 --config rebase.experimental.inmemory=yes --config devel.rebase.force-in-memory-merge=yes
   rebasing 2:* tip "rename a to b" (glob)
   merging a and b to b
   saved backup bundle to $TESTTMP/rebase-rename/.hg/strip-backup/*-*-rebase.hg (glob)
@@ -421,9 +424,9 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:           yes    yes      no
   persistent-nodemap:  no     no      no
   copies-sdc:         yes    yes      no
+  revlog-v2:          yes    yes      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib
   compression-level:  default default default
@@ -447,9 +450,9 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:           yes    yes      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no     no      no
+  revlog-v2:          yes    yes      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib
   compression-level:  default default default
@@ -475,9 +478,9 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:           yes    yes      no
   persistent-nodemap:  no     no      no
   copies-sdc:         yes    yes      no
+  revlog-v2:          yes    yes      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib
   compression-level:  default default default
--- a/tests/test-copies.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-copies.t	Thu Mar 18 18:24:59 2021 -0400
@@ -93,8 +93,10 @@
      x y
   $ hg debugp1copies -r 1
   x -> y
-Incorrectly doesn't show the rename
   $ hg debugpathcopies 0 1
+  x -> y (no-filelog !)
+  $ hg debugpathcopies 0 1  --config devel.copy-tracing.trace-all-files=yes
+  x -> y
 
 Copy a file onto another file with same content. If metadata is stored in changeset, this does not
 produce a new filelog entry. The changeset's "files" entry should still list the file.
@@ -111,8 +113,10 @@
      x x2
   $ hg debugp1copies -r 1
   x -> x2
-Incorrectly doesn't show the rename
   $ hg debugpathcopies 0 1
+  x -> x2 (no-filelog !)
+  $ hg debugpathcopies 0 1  --config devel.copy-tracing.trace-all-files=yes
+  x -> x2
 
 Rename file in a loop: x->y->z->x
   $ newrepo
@@ -374,6 +378,29 @@
   $ hg debugpathcopies 1 3
   x -> z
 
+Copy x->y on two separate branches. Pathcopies from one branch to the other
+should not report the copy.
+  $ newrepo
+  $ echo x > x
+  $ hg ci -Aqm 'add x'
+  $ hg cp x y
+  $ hg ci -qm 'copy x to y'
+  $ hg co -q 0
+  $ hg graft 1 -q
+  $ hg l
+  @  2 copy x to y
+  |  y
+  | o  1 copy x to y
+  |/   y
+  o  0 add x
+     x
+  $ hg debugp1copies -r 1
+  x -> y
+  $ hg debugp1copies -r 2
+  x -> y
+  $ hg debugpathcopies 1 2
+  $ hg debugpathcopies 2 1
+
 Copy x to y on one side of merge, create y and rename to z on the other side.
   $ newrepo
   $ echo x > x
--- a/tests/test-copy.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-copy.t	Thu Mar 18 18:24:59 2021 -0400
@@ -228,6 +228,17 @@
 should show no copies
   $ hg st -C
 
+note: since filelog based copy tracing only trace copy for new file, the copy information here is not displayed.
+
+  $ hg status --copies --change .
+  M bar
+
+They are a devel option to walk all file and fine this information anyway.
+
+  $ hg status --copies --change . --config devel.copy-tracing.trace-all-files=yes
+  M bar
+    foo
+
 copy --after on an added file
   $ cp bar baz
   $ hg add baz
--- a/tests/test-debugcommands.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-debugcommands.t	Thu Mar 18 18:24:59 2021 -0400
@@ -531,9 +531,17 @@
 
 Test WdirUnsupported exception
 
+#if no-rhg
   $ hg debugdata -c ffffffffffffffffffffffffffffffffffffffff
   abort: working directory revision cannot be specified
   [255]
+#else
+TODO: add rhg support for (at least parsing) the working directory pseudo-changeset
+  $ hg debugdata -c ffffffffffffffffffffffffffffffffffffffff
+  abort: working directory revision cannot be specified (missing-correct-output !)
+  abort: invalid revision identifier: ffffffffffffffffffffffffffffffffffffffff (known-bad-output !)
+  [255]
+#endif
 
 Test cache warming command
 
@@ -636,7 +644,6 @@
     remote-changegroup
       http
       https
-    rev-branch-cache
     stream
       v2
 
@@ -654,7 +661,7 @@
   devel-peer-request:   pairs: 81 bytes
   sending hello command
   sending between command
-  remote: 463
+  remote: 444
   remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
--- a/tests/test-default-push.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-default-push.t	Thu Mar 18 18:24:59 2021 -0400
@@ -146,4 +146,40 @@
     ^ here)
   [10]
 
+default :pushrev is taking in account
+
+  $ echo babar > foo
+  $ hg ci -m 'extra commit'
+  $ hg up '.^'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo celeste > foo
+  $ hg ci -m 'extra other commit'
+  created new head
+  $ cat >> .hg/hgrc << EOF
+  > [paths]
+  > other = file://$WD/../pushurldest
+  > *:pushrev = .
+  > EOF
+  $ hg push other
+  pushing to file:/*/$TESTTMP/pushurlsource/../pushurldest (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  $ hg push file://$WD/../pushurldest
+  pushing to file:/*/$TESTTMP/pushurlsource/../pushurldest (glob)
+  searching for changes
+  no changes found
+  [1]
+
+for comparison, pushing everything would give different result
+
+  $ hg push file://$WD/../pushurldest --rev 'all()'
+  pushing to file:/*/$TESTTMP/pushurlsource/../pushurldest (glob)
+  searching for changes
+  abort: push creates new remote head 1616ce7cecc8
+  (merge or see 'hg help push' for details about pushing new heads)
+  [20]
+
   $ cd ..
--- a/tests/test-diff-change.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-diff-change.t	Thu Mar 18 18:24:59 2021 -0400
@@ -194,4 +194,105 @@
    9
    10
 
+merge diff should show only manual edits to a merge:
+
+  $ hg diff --config diff.merge=yes -c 6
+(no diff output is expected here)
+
+Construct an "evil merge" that does something other than just the merge.
+
+  $ hg co ".^"
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg merge -r 5
+  merging file.txt
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ echo 11 >> file.txt
+  $ hg ci -m 'merge 8 to y with manual edit of 11' # 7
+  created new head
+  $ hg diff -c 7
+  diff -r 273b50f17c6d -r 8ad85e839ba7 file.txt
+  --- a/file.txt	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/file.txt	Thu Jan 01 00:00:00 1970 +0000
+  @@ -6,6 +6,7 @@
+   5
+   6
+   7
+  -8
+  +y
+   9
+   10
+  +11
+Contrast with the `hg diff -c 7` version above: only the manual edit shows
+up, making it easy to identify changes someone is otherwise trying to sneak
+into a merge.
+  $ hg diff --config diff.merge=yes -c 7
+  diff -r 8ad85e839ba7 file.txt
+  --- a/file.txt	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/file.txt	Thu Jan 01 00:00:00 1970 +0000
+  @@ -9,3 +9,4 @@
+   y
+   9
+   10
+  +11
+
+Set up a conflict.
+  $ hg co ".^"
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ sed -e 's,^8$,z,' file.txt > file.txt.tmp
+  $ mv file.txt.tmp file.txt
+  $ hg ci -m 'conflicting edit: 8 to z'
+  created new head
+  $ echo "this file is new in p1 of the merge" > new-file-p1.txt
+  $ hg ci -Am 'new file' new-file-p1.txt
+  $ hg log -r . --template 'p1 will be rev {rev}\n'
+  p1 will be rev 9
+  $ hg co 5
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ echo "this file is new in p2 of the merge" > new-file-p2.txt
+  $ hg ci -Am 'new file' new-file-p2.txt
+  created new head
+  $ hg log -r . --template 'p2 will be rev {rev}\n'
+  p2 will be rev 10
+  $ hg co -- 9
+  2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge -r 10
+  merging file.txt
+  warning: conflicts while merging file.txt! (edit, then use 'hg resolve --mark')
+  1 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  [1]
+  $ hg revert file.txt -r .
+  $ hg resolve -ma
+  (no more unresolved files)
+  $ hg commit -m 'merge conflicted edit'
+Without diff.merge, it's a diff against p1
+  $ hg diff --config diff.merge=no -c 11
+  diff -r fd1f17c90d7c -r 5010caab09f6 new-file-p2.txt
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/new-file-p2.txt	Thu Jan 01 00:00:00 1970 +0000
+  @@ -0,0 +1,1 @@
+  +this file is new in p2 of the merge
+With diff.merge, it's a diff against the conflicted content.
+  $ hg diff --config diff.merge=yes -c 11
+  diff -r 5010caab09f6 file.txt
+  --- a/file.txt	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/file.txt	Thu Jan 01 00:00:00 1970 +0000
+  @@ -6,12 +6,6 @@
+   5
+   6
+   7
+  -<<<<<<< local: fd1f17c90d7c - test: new file
+   z
+  -||||||| base
+  -8
+  -=======
+  -y
+  ->>>>>>> other: d9e7de69eac3 - test: new file
+   9
+   10
+
+There must _NOT_ be a .hg/merge directory leftover.
+  $ test ! -d .hg/merge
+(No output is expected)
   $ cd ..
--- a/tests/test-dispatch.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-dispatch.t	Thu Mar 18 18:24:59 2021 -0400
@@ -90,9 +90,12 @@
 
   $ mkdir -p badrepo/.hg
   $ echo 'invalid-syntax' > badrepo/.hg/hgrc
+TODO: add rhg support for detailed exit codes
+#if no-rhg
   $ hg log -b -Rbadrepo default
   config error at badrepo/.hg/hgrc:1: invalid-syntax
   [30]
+#endif
 
   $ hg log -b --cwd=inexistent default
   abort: $ENOENT$: 'inexistent'
@@ -154,7 +157,7 @@
 
   $ HGPLAIN=+strictflags hg --config='hooks.pre-log=false' log -b default
   abort: pre-log hook exited with status 1
-  [255]
+  [40]
   $ HGPLAIN=+strictflags hg --cwd .. -q -Ra log -b default
   0:cb9a9f314b8b
   $ HGPLAIN=+strictflags hg --cwd .. -q --repository a log -b default
@@ -166,7 +169,7 @@
 
   $ HGPLAIN= hg log --config='hooks.pre-log=false' -b default
   abort: pre-log hook exited with status 1
-  [255]
+  [40]
   $ HGPLAINEXCEPT= hg log --cwd .. -q -Ra -b default
   0:cb9a9f314b8b
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-multi-source.t	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,518 @@
+====================================================
+Test push/pull from multiple source at the same time
+====================================================
+
+
+Setup
+=====
+
+main repository
+---------------
+
+  $ . $RUNTESTDIR/testlib/common.sh
+  $ hg init main-repo
+  $ cd main-repo
+  $ mkcommit A
+  $ mkcommit B
+  $ mkcommit C
+  $ mkcommit D
+  $ mkcommit E
+  $ hg up 'desc(B)'
+  0 files updated, 0 files merged, 3 files removed, 0 files unresolved
+  $ mkcommit F
+  created new head
+  $ mkcommit G
+  $ hg up 'desc(C)'
+  1 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ mkcommit H
+  created new head
+  $ hg up null --quiet
+  $ hg log -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  o  H 7
+  |
+  | o  E 4
+  | |
+  | o  D 3
+  |/
+  o  C 2
+  |
+  | o  G 6
+  | |
+  | o  F 5
+  |/
+  o  B 1
+  |
+  o  A 0
+  
+  $ cd ..
+
+Various other repositories
+--------------------------
+
+  $ hg clone main-repo branch-E --rev 4 -U
+  adding changesets
+  adding manifests
+  adding file changes
+  added 5 changesets with 5 changes to 5 files
+  new changesets 4a2df7238c3b:a603bfb5a83e
+  $ hg clone main-repo branch-G --rev 6 -U
+  adding changesets
+  adding manifests
+  adding file changes
+  added 4 changesets with 4 changes to 4 files
+  new changesets 4a2df7238c3b:c521a06b234b
+  $ hg clone main-repo branch-H --rev 7 -U
+  adding changesets
+  adding manifests
+  adding file changes
+  added 4 changesets with 4 changes to 4 files
+  new changesets 4a2df7238c3b:40faebb2ec45
+
+Test simple bare operation
+==========================
+
+pull
+----
+
+  $ hg clone main-repo test-repo-bare --rev 0 -U
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  new changesets 4a2df7238c3b
+
+  $ hg pull -R test-repo-bare ./branch-E ./branch-G ./branch-H
+  pulling from ./branch-E
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 4 changesets with 4 changes to 4 files
+  new changesets 27547f69f254:a603bfb5a83e
+  (run 'hg update' to get a working copy)
+  pulling from ./branch-G
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files (+1 heads)
+  new changesets 2f3a4c5c1417:c521a06b234b
+  (run 'hg heads' to see heads, 'hg merge' to merge)
+  pulling from ./branch-H
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  new changesets 40faebb2ec45
+  (run 'hg heads .' to see heads, 'hg merge' to merge)
+  $ hg log -R test-repo-bare -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  o  H 7
+  |
+  | o  E 4
+  | |
+  | o  D 3
+  |/
+  o  C 2
+  |
+  | o  G 6
+  | |
+  | o  F 5
+  |/
+  o  B 1
+  |
+  o  A 0
+  
+
+push
+----
+
+  $ cp -R ./branch-E ./branch-E-push
+  $ cp -R ./branch-G ./branch-G-push
+  $ cp -R ./branch-H ./branch-H-push
+  $ hg push --force -R test-repo-bare ./branch-E-push ./branch-G-push ./branch-H-push
+  pushing to ./branch-E-push
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 3 changesets with 3 changes to 3 files (+2 heads)
+  pushing to ./branch-G-push
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 4 changesets with 4 changes to 4 files (+2 heads)
+  pushing to ./branch-H-push
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 4 changesets with 4 changes to 4 files (+2 heads)
+  $ hg log -R ./branch-E-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  o  H 7
+  |
+  | o  E 4
+  | |
+  | o  D 3
+  |/
+  o  C 2
+  |
+  | o  G 6
+  | |
+  | o  F 5
+  |/
+  o  B 1
+  |
+  o  A 0
+  
+  $ hg log -R ./branch-G-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  o  H 7
+  |
+  | o  E 6
+  | |
+  | o  D 5
+  |/
+  o  C 4
+  |
+  | o  G 3
+  | |
+  | o  F 2
+  |/
+  o  B 1
+  |
+  o  A 0
+  
+  $ hg log -R ./branch-H-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  o  G 7
+  |
+  o  F 6
+  |
+  | o  E 5
+  | |
+  | o  D 4
+  | |
+  | | o  H 3
+  | |/
+  | o  C 2
+  |/
+  o  B 1
+  |
+  o  A 0
+  
+  $ rm -rf ./*-push
+
+Test operation with a target
+============================
+
+pull
+----
+
+  $ hg clone main-repo test-repo-rev --rev 0 -U
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  new changesets 4a2df7238c3b
+
+pulling an explicite revision
+
+  $ node_b=`hg log -R main-repo --rev 'desc(B)' -T '{node}'`
+  $ hg pull -R test-repo-rev ./branch-E ./branch-G ./branch-H --rev $node_b
+  pulling from ./branch-E
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  new changesets 27547f69f254
+  (run 'hg update' to get a working copy)
+  pulling from ./branch-G
+  no changes found
+  pulling from ./branch-H
+  no changes found
+  $ hg log -R test-repo-rev -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  o  B 1
+  |
+  o  A 0
+  
+
+pulling a branch head, the branch head resolve to different revision on the
+different repositories.
+
+  $ hg pull -R test-repo-rev ./branch-E ./branch-G ./branch-H --rev default
+  pulling from ./branch-E
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 3 changesets with 3 changes to 3 files
+  new changesets f838bfaca5c7:a603bfb5a83e
+  (run 'hg update' to get a working copy)
+  pulling from ./branch-G
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files (+1 heads)
+  new changesets 2f3a4c5c1417:c521a06b234b
+  (run 'hg heads' to see heads, 'hg merge' to merge)
+  pulling from ./branch-H
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  new changesets 40faebb2ec45
+  (run 'hg heads .' to see heads, 'hg merge' to merge)
+  $ hg log -R test-repo-rev -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  o  H 7
+  |
+  | o  E 4
+  | |
+  | o  D 3
+  |/
+  o  C 2
+  |
+  | o  G 6
+  | |
+  | o  F 5
+  |/
+  o  B 1
+  |
+  o  A 0
+  
+
+push
+----
+
+We only push a specific branch with --rev
+
+  $ cp -R ./branch-E ./branch-E-push
+  $ cp -R ./branch-G ./branch-G-push
+  $ cp -R ./branch-H ./branch-H-push
+  $ hg push --force -R test-repo-bare ./branch-E-push ./branch-G-push ./branch-H-push --rev default
+  pushing to ./branch-E-push
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  pushing to ./branch-G-push
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files (+1 heads)
+  pushing to ./branch-H-push
+  searching for changes
+  no changes found
+  $ hg log -R ./branch-E-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  o  H 5
+  |
+  | o  E 4
+  | |
+  | o  D 3
+  |/
+  o  C 2
+  |
+  o  B 1
+  |
+  o  A 0
+  
+  $ hg log -R ./branch-G-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  o  H 5
+  |
+  o  C 4
+  |
+  | o  G 3
+  | |
+  | o  F 2
+  |/
+  o  B 1
+  |
+  o  A 0
+  
+  $ hg log -R ./branch-H-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  o  H 3
+  |
+  o  C 2
+  |
+  o  B 1
+  |
+  o  A 0
+  
+  $ rm -rf ./*-push
+
+Same push, but the first one is a no-op
+
+  $ cp -R ./branch-E ./branch-E-push
+  $ cp -R ./branch-G ./branch-G-push
+  $ cp -R ./branch-H ./branch-H-push
+  $ hg push --force -R test-repo-bare ./branch-G-push ./branch-H-push ./branch-E-push --rev default
+  pushing to ./branch-G-push
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files (+1 heads)
+  pushing to ./branch-H-push
+  searching for changes
+  no changes found
+  pushing to ./branch-E-push
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  $ hg log -R ./branch-E-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  o  H 5
+  |
+  | o  E 4
+  | |
+  | o  D 3
+  |/
+  o  C 2
+  |
+  o  B 1
+  |
+  o  A 0
+  
+  $ hg log -R ./branch-G-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  o  H 5
+  |
+  o  C 4
+  |
+  | o  G 3
+  | |
+  | o  F 2
+  |/
+  o  B 1
+  |
+  o  A 0
+  
+  $ hg log -R ./branch-H-push -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  o  H 3
+  |
+  o  C 2
+  |
+  o  B 1
+  |
+  o  A 0
+  
+  $ rm -rf ./*-push
+
+
+Test with --update
+==================
+
+update without conflicts
+------------------------
+
+  $ hg clone main-repo test-repo-update --rev 0
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  new changesets 4a2df7238c3b
+  updating to branch default
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+We update for each pull, so the first on get into a branch independant from the
+other and stay there. This is the expected behavior.
+
+  $ hg log -R test-repo-update -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  @  A 0
+  
+  $ hg pull -R test-repo-update ./branch-E ./branch-G ./branch-H --update
+  pulling from ./branch-E
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 4 changesets with 4 changes to 4 files
+  new changesets 27547f69f254:a603bfb5a83e
+  4 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  pulling from ./branch-G
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files (+1 heads)
+  new changesets 2f3a4c5c1417:c521a06b234b
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  updated to "a603bfb5a83e: E"
+  1 other heads for branch "default"
+  pulling from ./branch-H
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  new changesets 40faebb2ec45
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  updated to "a603bfb5a83e: E"
+  2 other heads for branch "default"
+  $ hg log -R test-repo-update -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  o  H 7
+  |
+  | @  E 4
+  | |
+  | o  D 3
+  |/
+  o  C 2
+  |
+  | o  G 6
+  | |
+  | o  F 5
+  |/
+  o  B 1
+  |
+  o  A 0
+  
+
+update with conflicts
+---------------------
+
+  $ hg clone main-repo test-repo-conflict --rev 0
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  new changesets 4a2df7238c3b
+  updating to branch default
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+The update has conflict and interrupt the pull.
+
+  $ echo this-will-conflict > test-repo-conflict/D
+  $ hg add -R test-repo-conflict test-repo-conflict/D
+  $ hg log -R test-repo-conflict -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  @  A 0
+  
+  $ hg pull -R test-repo-conflict ./branch-E ./branch-G ./branch-H --update
+  pulling from ./branch-E
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 4 changesets with 4 changes to 4 files
+  new changesets 27547f69f254:a603bfb5a83e
+  merging D
+  warning: conflicts while merging D! (edit, then use 'hg resolve --mark')
+  3 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges
+  [1]
+  $ hg -R test-repo-conflict resolve -l
+  U D
+  $ hg log -R test-repo-conflict -T '{desc} {rev}\n' --rev 'sort(all(), "topo")' -G
+  @  E 4
+  |
+  o  D 3
+  |
+  o  C 2
+  |
+  o  B 1
+  |
+  %  A 0
+  
--- a/tests/test-globalopts.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-globalopts.t	Thu Mar 18 18:24:59 2021 -0400
@@ -65,6 +65,8 @@
 
 -R with path aliases:
 
+TODO: add rhg support for path aliases
+#if no-rhg
   $ cd c
   $ hg -R default identify
   8580ff50825a tip
@@ -75,6 +77,7 @@
   $ HOME=`pwd`/../ hg -R relativetohome identify
   8580ff50825a tip
   $ cd ..
+#endif
 
 #if no-outer-repo
 
@@ -215,6 +218,8 @@
 
   $ hg --cwd c --config paths.quuxfoo=bar paths | grep quuxfoo > /dev/null && echo quuxfoo
   quuxfoo
+TODO: add rhg support for detailed exit codes
+#if no-rhg
   $ hg --cwd c --config '' tip -q
   abort: malformed --config option: '' (use --config section.name=value)
   [10]
@@ -230,6 +235,7 @@
   $ hg --cwd c --config .b= tip -q
   abort: malformed --config option: '.b=' (use --config section.name=value)
   [10]
+#endif
 
 Testing --debug:
 
@@ -264,7 +270,7 @@
 
 Testing --traceback:
 
-#if no-chg
+#if no-chg no-rhg
   $ hg --cwd c --config x --traceback id 2>&1 | grep -i 'traceback'
   Traceback (most recent call last):
   Traceback (most recent call last): (py3 !)
@@ -351,6 +357,7 @@
    addremove     add all new files, delete all missing files
    files         list tracked files
    forget        forget the specified files on the next commit
+   purge         removes files not tracked by Mercurial
    remove        remove the specified files on the next commit
    rename        rename files; equivalent of copy + remove
    resolve       redo merges or set/view the merge status of files
@@ -483,6 +490,7 @@
    addremove     add all new files, delete all missing files
    files         list tracked files
    forget        forget the specified files on the next commit
+   purge         removes files not tracked by Mercurial
    remove        remove the specified files on the next commit
    rename        rename files; equivalent of copy + remove
    resolve       redo merges or set/view the merge status of files
--- a/tests/test-graft.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-graft.t	Thu Mar 18 18:24:59 2021 -0400
@@ -223,10 +223,6 @@
   committing changelog
   updating the branch cache
   grafting 5:97f8bfe72746 "5"
-    all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     on local side:
-      src: 'c' -> dst: 'b' 
-    checking for directory renames
   resolving manifests
    branchmerge: True, force: True, partial: False
    ancestor: 4c60f11aa304, local: 6b9e5368ca4e+, remote: 97f8bfe72746
@@ -240,10 +236,6 @@
   $ HGEDITOR=cat hg graft 4 3 --log --debug
   scanning for duplicate grafts
   grafting 4:9c233e8e184d "4"
-    all copies found (* = to merge, ! = divergent, % = renamed and deleted):
-     on local side:
-      src: 'c' -> dst: 'b' 
-    checking for directory renames
   resolving manifests
    branchmerge: True, force: True, partial: False
    ancestor: 4c60f11aa304, local: 1905859650ec+, remote: 9c233e8e184d
--- a/tests/test-help-hide.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-help-hide.t	Thu Mar 18 18:24:59 2021 -0400
@@ -55,6 +55,7 @@
    addremove     add all new files, delete all missing files
    files         list tracked files
    forget        forget the specified files on the next commit
+   purge         removes files not tracked by Mercurial
    remove        remove the specified files on the next commit
    rename        rename files; equivalent of copy + remove
    resolve       redo merges or set/view the merge status of files
@@ -191,6 +192,7 @@
    addremove     add all new files, delete all missing files
    files         list tracked files
    forget        forget the specified files on the next commit
+   purge         removes files not tracked by Mercurial
    remove        remove the specified files on the next commit
    rename        rename files; equivalent of copy + remove
    resolve       redo merges or set/view the merge status of files
--- a/tests/test-help.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-help.t	Thu Mar 18 18:24:59 2021 -0400
@@ -107,6 +107,7 @@
    addremove     add all new files, delete all missing files
    files         list tracked files
    forget        forget the specified files on the next commit
+   purge         removes files not tracked by Mercurial
    remove        remove the specified files on the next commit
    rename        rename files; equivalent of copy + remove
    resolve       redo merges or set/view the merge status of files
@@ -235,6 +236,7 @@
    addremove     add all new files, delete all missing files
    files         list tracked files
    forget        forget the specified files on the next commit
+   purge         removes files not tracked by Mercurial
    remove        remove the specified files on the next commit
    rename        rename files; equivalent of copy + remove
    resolve       redo merges or set/view the merge status of files
@@ -375,8 +377,6 @@
        mq            manage a stack of patches
        notify        hooks for sending email push notifications
        patchbomb     command to send changesets as (a series of) patch emails
-       purge         command to delete untracked files from the working
-                     directory
        relink        recreates hardlinks between repository clones
        schemes       extend schemes with shortcuts to repository swarms
        share         share a common history between several working directories
@@ -1069,6 +1069,7 @@
    debugsetparents
                  manually set the parents of the current working directory
                  (DANGEROUS)
+   debugshell    run an interactive Python interpreter
    debugsidedata
                  dump the side data for a cl/manifest/file revision
    debugssl      test a secure connection to a server
@@ -2720,6 +2721,13 @@
   set or show the current phase name
   </td></tr>
   <tr><td>
+  <a href="/help/purge">
+  purge
+  </a>
+  </td><td>
+  removes files not tracked by Mercurial
+  </td></tr>
+  <tr><td>
   <a href="/help/recover">
   recover
   </a>
--- a/tests/test-hgrc.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-hgrc.t	Thu Mar 18 18:24:59 2021 -0400
@@ -59,7 +59,7 @@
 #if unix-permissions no-root
   $ chmod u-r $TESTTMP/included
   $ hg showconfig section
-  config error at $TESTTMP/hgrc:2: cannot include $TESTTMP/included (Permission denied)
+  config error at $TESTTMP/hgrc:2: cannot include $TESTTMP/included (Permission denied*) (glob)
   [255]
 #endif
 
--- a/tests/test-hgweb-filelog.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-hgweb-filelog.t	Thu Mar 18 18:24:59 2021 -0400
@@ -656,7 +656,7 @@
   An error occurred while processing your request:
   </p>
   <p>
-  a@6563da9dcf87: not found in manifest
+  a@6563da9dcf87b1949716e38ff3e3dfaa3198eb06: not found in manifest
   </p>
   </div>
   </div>
--- a/tests/test-hgweb-json.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-hgweb-json.t	Thu Mar 18 18:24:59 2021 -0400
@@ -2190,6 +2190,10 @@
         "topic": "phase"
       },
       {
+        "summary": "removes files not tracked by Mercurial",
+        "topic": "purge"
+      },
+      {
         "summary": "roll back an interrupted transaction",
         "topic": "recover"
       },
--- a/tests/test-hgweb.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-hgweb.t	Thu Mar 18 18:24:59 2021 -0400
@@ -149,7 +149,7 @@
   404 Not Found
   
   
-  error: bork@2ef0ac749a14: not found in manifest
+  error: bork@2ef0ac749a14e4f57a5a822464a0902c6f7f448f: not found in manifest
   [1]
   $ get-with-headers.py localhost:$HGPORT 'file/tip/bork'
   404 Not Found
@@ -202,7 +202,7 @@
   An error occurred while processing your request:
   </p>
   <p>
-  bork@2ef0ac749a14: not found in manifest
+  bork@2ef0ac749a14e4f57a5a822464a0902c6f7f448f: not found in manifest
   </p>
   </div>
   </div>
@@ -218,7 +218,7 @@
   404 Not Found
   
   
-  error: bork@2ef0ac749a14: not found in manifest
+  error: bork@2ef0ac749a14e4f57a5a822464a0902c6f7f448f: not found in manifest
   [1]
 
 try bad style
--- a/tests/test-hgwebdir.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-hgwebdir.t	Thu Mar 18 18:24:59 2021 -0400
@@ -103,7 +103,7 @@
   404 Not Found
   
   
-  error: bork@8580ff50825a: not found in manifest
+  error: bork@8580ff50825a50c8f716709acdf8de0deddcd6ab: not found in manifest
   [1]
 
 should succeed
--- a/tests/test-histedit-edit.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-histedit-edit.t	Thu Mar 18 18:24:59 2021 -0400
@@ -375,7 +375,7 @@
   note: commit message saved in .hg/last-message.txt
   note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
   abort: pretxncommit.unexpectedabort hook exited with status 1
-  [255]
+  [40]
   $ cat .hg/last-message.txt
   f
   
@@ -400,7 +400,7 @@
   note: commit message saved in .hg/last-message.txt
   note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
   abort: pretxncommit.unexpectedabort hook exited with status 1
-  [255]
+  [40]
 
   $ cat >> .hg/hgrc <<EOF
   > [hooks]
--- a/tests/test-histedit-fold.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-histedit-fold.t	Thu Mar 18 18:24:59 2021 -0400
@@ -202,7 +202,7 @@
   transaction abort!
   rollback completed
   abort: pretxncommit.abortfolding hook failed
-  [255]
+  [40]
 
   $ cat .hg/last-message.txt
   f
--- a/tests/test-hook.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-hook.t	Thu Mar 18 18:24:59 2021 -0400
@@ -227,7 +227,7 @@
   HG_PATS=[]
   
   abort: pre-identify hook exited with status 1
-  [255]
+  [40]
   $ hg cat b
   pre-cat hook: HG_ARGS=cat b
   HG_HOOKNAME=pre-cat
@@ -390,7 +390,7 @@
   HG_TAG=fa
   
   abort: pretag.forbid hook exited with status 1
-  [255]
+  [40]
   $ hg tag -l fla
   pretag hook: HG_HOOKNAME=pretag
   HG_HOOKTYPE=pretag
@@ -405,7 +405,7 @@
   HG_TAG=fla
   
   abort: pretag.forbid hook exited with status 1
-  [255]
+  [40]
 
 pretxncommit hook can see changeset, can roll back txn, changeset no
 more there after
@@ -451,7 +451,7 @@
   
   rollback completed
   abort: pretxncommit.forbid1 hook exited with status 1
-  [255]
+  [40]
   $ hg -q tip
   4:539e4b31b6dc
 
@@ -485,7 +485,7 @@
   HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
   
   abort: precommit.forbid hook exited with status 1
-  [255]
+  [40]
   $ hg -q tip
   4:539e4b31b6dc
 
@@ -644,7 +644,7 @@
   HG_URL=file:$TESTTMP/a
   
   abort: prepushkey hook exited with status 1
-  [255]
+  [40]
   $ cd ../a
 
 test that prelistkeys can prevent listing keys
@@ -679,7 +679,7 @@
   HG_NAMESPACE=bookmarks
   
   abort: prelistkeys hook exited with status 1
-  [255]
+  [40]
   $ cd ../a
   $ rm .hg/hgrc
 
@@ -704,7 +704,7 @@
   HG_URL=file:$TESTTMP/a
   
   abort: prechangegroup.forbid hook exited with status 1
-  [255]
+  [40]
 
 pretxnchangegroup hook can see incoming changes, can roll back txn,
 incoming changes no longer there after
@@ -735,7 +735,7 @@
   transaction abort!
   rollback completed
   abort: pretxnchangegroup.forbid1 hook exited with status 1
-  [255]
+  [40]
   $ hg -q tip
   3:07f3376c1e65
 
@@ -786,7 +786,7 @@
   HG_SOURCE=pull
   
   abort: preoutgoing.forbid hook exited with status 1
-  [255]
+  [40]
 
 outgoing hooks work for local clones
 
@@ -825,7 +825,7 @@
   HG_SOURCE=clone
   
   abort: preoutgoing.forbid hook exited with status 1
-  [255]
+  [40]
 
   $ cd "$TESTTMP/b"
 
@@ -915,7 +915,7 @@
     hooktype preoutgoing
     source pull
   abort: preoutgoing.fail hook failed
-  [255]
+  [40]
 
   $ echo '[hooks]' > ../a/.hg/hgrc
   $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
@@ -1283,7 +1283,7 @@
   rollback completed
   strip failed, backup bundle stored in * (glob)
   abort: pretxnclose.error hook exited with status 1
-  [255]
+  [40]
   $ hg recover
   no interrupted transaction available
   [1]
@@ -1306,7 +1306,7 @@
   transaction abort!
   rollback completed
   abort: pretxnclose hook exited with status 1
-  [255]
+  [40]
   $ cp .hg/store/00changelog.i.a.saved .hg/store/00changelog.i.a
 
 (check (in)visibility of new changeset while transaction running in
@@ -1331,7 +1331,7 @@
   transaction abort!
   rollback completed
   abort: pretxnclose hook exited with status 1
-  [255]
+  [40]
 
 Hook from untrusted hgrc are reported as failure
 ================================================
@@ -1382,7 +1382,7 @@
   rollback completed
   abort: untrusted hook pretxnclose.testing not executed
   (see 'hg help config.trusted')
-  [255]
+  [40]
   $ hg log
   changeset:   0:3903775176ed
   tag:         tip
--- a/tests/test-http-bad-server.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-http-bad-server.t	Thu Mar 18 18:24:59 2021 -0400
@@ -118,17 +118,17 @@
   readline(115 from *) -> (*) host: localhost:$HGPORT\r\n (glob)
   readline(* from *) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
   readline(* from *) -> (2) \r\n (glob)
-  sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py36 !)
-  sendall(450) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
-  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py3 no-py36 !)
-  write(450) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+  sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
+  sendall(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
+  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
+  write(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
   write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
   write(23) -> Server: badhttpserver\r\n (no-py3 !)
   write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
   write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
-  write(21) -> Content-Length: 450\r\n (no-py3 !)
+  write(21) -> Content-Length: 431\r\n (no-py3 !)
   write(2) -> \r\n (no-py3 !)
-  write(450) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
+  write(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
   readline(4? from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
   readline(1? from *) -> (1?) Accept-Encoding* (glob)
   read limit reached; closing socket
@@ -163,17 +163,17 @@
   readline(213 from *) -> (*) host: localhost:$HGPORT\r\n (glob)
   readline(* from *) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
   readline(* from *) -> (2) \r\n (glob)
-  sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py36 !)
-  sendall(450) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
-  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py3 no-py36 !)
-  write(450) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+  sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
+  sendall(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
+  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
+  write(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
   write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
   write(23) -> Server: badhttpserver\r\n (no-py3 !)
   write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
   write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
-  write(21) -> Content-Length: 450\r\n (no-py3 !)
+  write(21) -> Content-Length: 431\r\n (no-py3 !)
   write(2) -> \r\n (no-py3 !)
-  write(450) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
+  write(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
   readline(13? from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
   readline(1?? from *) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(8? from *) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -228,17 +228,17 @@
   readline(234 from *) -> (2?) host: localhost:$HGPORT\r\n (glob)
   readline(* from *) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
   readline(* from *) -> (2) \r\n (glob)
-  sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 463\r\n\r\n (py36 !)
-  sendall(463) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
-  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 463\r\n\r\n (py3 no-py36 !)
-  write(463) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+  sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 444\r\n\r\n (py36 !)
+  sendall(444) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
+  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 444\r\n\r\n (py3 no-py36 !)
+  write(444) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
   write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
   write(23) -> Server: badhttpserver\r\n (no-py3 !)
   write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
   write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
-  write(21) -> Content-Length: 463\r\n (no-py3 !)
+  write(21) -> Content-Length: 444\r\n (no-py3 !)
   write(2) -> \r\n (no-py3 !)
-  write(463) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
+  write(444) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
   readline(1?? from 65537) -> (27) POST /?cmd=batch HTTP/1.1\r\n (glob)
   readline(1?? from *) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(1?? from *) -> (41) content-type: application/mercurial-0.1\r\n (glob)
@@ -296,7 +296,7 @@
   Traceback (most recent call last):
   Exception: connection closed after sending N bytes
   
-  write(286) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
+  write(286) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
   write(36) -> HTTP/1.1 500 Internal Server Error\r\n (no-py3 !)
 
   $ rm -f error.log
@@ -307,7 +307,7 @@
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
-  abort: HTTP request error (incomplete response; expected 450 bytes got 20)
+  abort: HTTP request error (incomplete response; expected 431 bytes got 20)
   (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
   [255]
 
@@ -320,17 +320,17 @@
   readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
   readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
   readline(*) -> (2) \r\n (glob)
-  sendall(160 from 160) -> (20) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py36 !)
-  sendall(20 from 450) -> (0) batch branchmap bund (py36 !)
-  write(160 from 160) -> (20) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py3 no-py36 !)
-  write(20 from 450) -> (0) batch branchmap bund (py3 no-py36 !)
+  sendall(160 from 160) -> (20) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
+  sendall(20 from 431) -> (0) batch branchmap bund (py36 !)
+  write(160 from 160) -> (20) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
+  write(20 from 431) -> (0) batch branchmap bund (py3 no-py36 !)
   write(36 from 36) -> (144) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
   write(23 from 23) -> (121) Server: badhttpserver\r\n (no-py3 !)
   write(37 from 37) -> (84) Date: $HTTP_DATE$\r\n (no-py3 !)
   write(41 from 41) -> (43) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
-  write(21 from 21) -> (22) Content-Length: 450\r\n (no-py3 !)
+  write(21 from 21) -> (22) Content-Length: 431\r\n (no-py3 !)
   write(2 from 2) -> (20) \r\n (no-py3 !)
-  write(20 from 450) -> (0) batch branchmap bund (no-py3 !)
+  write(20 from 431) -> (0) batch branchmap bund (no-py3 !)
   write limit reached; closing socket
   $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=capabilities': (glob)
   Traceback (most recent call last):
@@ -341,7 +341,7 @@
 
 Server sends incomplete headers for batch request
 
-  $ hg serve --config badserver.closeaftersendbytes=728 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=709 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
 TODO this output is horrible
@@ -363,17 +363,17 @@
   readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
   readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
   readline(*) -> (2) \r\n (glob)
-  sendall(160 from 160) -> (568) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py36 !)
-  sendall(450 from 450) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
-  write(160 from 160) -> (568) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py3 no-py36 !)
-  write(450 from 450) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
-  write(36 from 36) -> (692) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
-  write(23 from 23) -> (669) Server: badhttpserver\r\n (no-py3 !)
-  write(37 from 37) -> (632) Date: $HTTP_DATE$\r\n (no-py3 !)
-  write(41 from 41) -> (591) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
-  write(21 from 21) -> (570) Content-Length: 450\r\n (no-py3 !)
-  write(2 from 2) -> (568) \r\n (no-py3 !)
-  write(450 from 450) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
+  sendall(160 from 160) -> (549) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
+  sendall(431 from 431) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
+  write(160 from 160) -> (568) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
+  write(431 from 431) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+  write(36 from 36) -> (673) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+  write(23 from 23) -> (650) Server: badhttpserver\r\n (no-py3 !)
+  write(37 from 37) -> (613) Date: $HTTP_DATE$\r\n (no-py3 !)
+  write(41 from 41) -> (572) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+  write(21 from 21) -> (551) Content-Length: 431\r\n (no-py3 !)
+  write(2 from 2) -> (549) \r\n (no-py3 !)
+  write(431 from 431) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
   readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -401,7 +401,7 @@
 
 Server sends an incomplete HTTP response body to batch request
 
-  $ hg serve --config badserver.closeaftersendbytes=793 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=774 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
 TODO client spews a stack due to uncaught ValueError in batch.results()
@@ -422,17 +422,17 @@
   readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
   readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
   readline(*) -> (2) \r\n (glob)
-  sendall(160 from 160) -> (633) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py36 !)
-  sendall(450 from 450) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
-  write(160 from 160) -> (633) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py3 no-py36 !)
-  write(450 from 450) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
-  write(36 from 36) -> (757) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
-  write(23 from 23) -> (734) Server: badhttpserver\r\n (no-py3 !)
-  write(37 from 37) -> (697) Date: $HTTP_DATE$\r\n (no-py3 !)
-  write(41 from 41) -> (656) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
-  write(21 from 21) -> (635) Content-Length: 450\r\n (no-py3 !)
-  write(2 from 2) -> (633) \r\n (no-py3 !)
-  write(450 from 450) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
+  sendall(160 from 160) -> (614) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
+  sendall(431 from 431) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
+  write(160 from 160) -> (633) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
+  write(431 from 431) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+  write(36 from 36) -> (738) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+  write(23 from 23) -> (715) Server: badhttpserver\r\n (no-py3 !)
+  write(37 from 37) -> (678) Date: $HTTP_DATE$\r\n (no-py3 !)
+  write(41 from 41) -> (637) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+  write(21 from 21) -> (616) Content-Length: 431\r\n (no-py3 !)
+  write(2 from 2) -> (614) \r\n (no-py3 !)
+  write(431 from 431) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
   readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -463,7 +463,7 @@
 
 Server sends incomplete headers for getbundle response
 
-  $ hg serve --config badserver.closeaftersendbytes=940 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=921 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
 TODO this output is terrible
@@ -486,17 +486,17 @@
   readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
   readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
   readline(*) -> (2) \r\n (glob)
-  sendall(160 from 160) -> (780) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py36 !)
-  sendall(450 from 450) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
-  write(160 from 160) -> (780) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py3 no-py36 !)
-  write(450 from 450) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
-  write(36 from 36) -> (904) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
-  write(23 from 23) -> (881) Server: badhttpserver\r\n (no-py3 !)
-  write(37 from 37) -> (844) Date: $HTTP_DATE$\r\n (no-py3 !)
-  write(41 from 41) -> (803) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
-  write(21 from 21) -> (782) Content-Length: 450\r\n (no-py3 !)
-  write(2 from 2) -> (780) \r\n (no-py3 !)
-  write(450 from 450) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
+  sendall(160 from 160) -> (761) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
+  sendall(431 from 431) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
+  write(160 from 160) -> (780) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
+  write(431 from 431) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+  write(36 from 36) -> (885) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+  write(23 from 23) -> (862) Server: badhttpserver\r\n (no-py3 !)
+  write(37 from 37) -> (825) Date: $HTTP_DATE$\r\n (no-py3 !)
+  write(41 from 41) -> (784) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+  write(21 from 21) -> (763) Content-Length: 431\r\n (no-py3 !)
+  write(2 from 2) -> (761) \r\n (no-py3 !)
+  write(431 from 431) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
   readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -520,7 +520,7 @@
   readline(65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
-  readline(*) -> (461) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
+  readline(*) -> (440) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
   readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
   readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
   readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
@@ -544,7 +544,7 @@
 
 Server stops before it sends transfer encoding
 
-  $ hg serve --config badserver.closeaftersendbytes=973 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=954 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -573,7 +573,7 @@
 
 Server sends empty HTTP body for getbundle
 
-  $ hg serve --config badserver.closeaftersendbytes=978 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=959 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -591,17 +591,17 @@
   readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
   readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
   readline(*) -> (2) \r\n (glob)
-  sendall(160 from 160) -> (818) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py36 !)
-  sendall(450 from 450) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
-  write(160 from 160) -> (818) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py3 no-py36 !)
-  write(450 from 450) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
-  write(36 from 36) -> (942) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
-  write(23 from 23) -> (919) Server: badhttpserver\r\n (no-py3 !)
-  write(37 from 37) -> (882) Date: $HTTP_DATE$\r\n (no-py3 !)
-  write(41 from 41) -> (841) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
-  write(21 from 21) -> (820) Content-Length: 450\r\n (no-py3 !)
-  write(2 from 2) -> (818) \r\n (no-py3 !)
-  write(450 from 450) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
+  sendall(160 from 160) -> (799) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
+  sendall(431 from 431) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
+  write(160 from 160) -> (818) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
+  write(431 from 431) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+  write(36 from 36) -> (923) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+  write(23 from 23) -> (900) Server: badhttpserver\r\n (no-py3 !)
+  write(37 from 37) -> (863) Date: $HTTP_DATE$\r\n (no-py3 !)
+  write(41 from 41) -> (822) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+  write(21 from 21) -> (801) Content-Length: 431\r\n (no-py3 !)
+  write(2 from 2) -> (799) \r\n (no-py3 !)
+  write(431 from 431) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
   readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -625,7 +625,7 @@
   readline(65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
-  readline(*) -> (461) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
+  readline(*) -> (440) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
   readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
   readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
   readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
@@ -651,7 +651,7 @@
 
 Server sends partial compression string
 
-  $ hg serve --config badserver.closeaftersendbytes=1002 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=983 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -669,17 +669,17 @@
   readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
   readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
   readline(*) -> (2) \r\n (glob)
-  sendall(160 from 160) -> (842) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py36 !)
-  sendall(450 from 450) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
-  write(160 from 160) -> (842) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 450\r\n\r\n (py3 no-py36 !)
-  write(450 from 450) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
-  write(36 from 36) -> (966) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
-  write(23 from 23) -> (943) Server: badhttpserver\r\n (no-py3 !)
-  write(37 from 37) -> (906) Date: $HTTP_DATE$\r\n (no-py3 !)
-  write(41 from 41) -> (865) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
-  write(21 from 21) -> (844) Content-Length: 450\r\n (no-py3 !)
-  write(2 from 2) -> (842) \r\n (no-py3 !)
-  write(450 from 450) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
+  sendall(160 from 160) -> (823) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
+  sendall(431 from 431) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
+  write(160 from 160) -> (842) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
+  write(431 from 431) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+  write(36 from 36) -> (947) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+  write(23 from 23) -> (924) Server: badhttpserver\r\n (no-py3 !)
+  write(37 from 37) -> (887) Date: $HTTP_DATE$\r\n (no-py3 !)
+  write(41 from 41) -> (846) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+  write(21 from 21) -> (825) Content-Length: 431\r\n (no-py3 !)
+  write(2 from 2) -> (823) \r\n (no-py3 !)
+  write(431 from 431) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
   readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -702,7 +702,7 @@
   readline(65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
-  readline(*) -> (461) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
+  readline(*) -> (440) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
   readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
   readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
   readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
@@ -733,7 +733,7 @@
 
 Server sends partial bundle2 header magic
 
-  $ hg serve --config badserver.closeaftersendbytes=999 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=980 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -778,7 +778,7 @@
 
 Server sends incomplete bundle2 stream params length
 
-  $ hg serve --config badserver.closeaftersendbytes=1008 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=989 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -825,7 +825,7 @@
 
 Servers stops after bundle2 stream params header
 
-  $ hg serve --config badserver.closeaftersendbytes=1011 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=992 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -871,7 +871,7 @@
 
 Server stops sending after bundle2 part header length
 
-  $ hg serve --config badserver.closeaftersendbytes=1020 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1001 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -920,7 +920,7 @@
 
 Server stops sending after bundle2 part header
 
-  $ hg serve --config badserver.closeaftersendbytes=1067 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1048 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -973,7 +973,7 @@
 
 Server stops after bundle2 part payload chunk size
 
-  $ hg serve --config badserver.closeaftersendbytes=1088 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1069 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -1029,7 +1029,7 @@
 
 Server stops sending in middle of bundle2 payload chunk
 
-  $ hg serve --config badserver.closeaftersendbytes=1549 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1530 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -1086,7 +1086,7 @@
 
 Server stops sending after 0 length payload chunk size
 
-  $ hg serve --config badserver.closeaftersendbytes=1580 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1561 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -1148,8 +1148,7 @@
 Server stops sending after 0 part bundle part header (indicating end of bundle2 payload)
 This is before the 0 size chunked transfer part that signals end of HTTP response.
 
-#  $ hg serve --config badserver.closeaftersendbytes=1755 -p $HGPORT -d --pid-file=hg.pid -E error.log
-  $ hg serve --config badserver.closeaftersendbytes=1862 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1736 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -1165,25 +1164,20 @@
   $ killdaemons.py $DAEMON_PIDS
 
 #if py36
-  $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -25
-  sendall(9 from 9) -> (851) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
-  sendall(9 from 9) -> (842) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
-  sendall(47 from 47) -> (795) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02	\x01version02nbchanges1\\r\\n (esc)
-  sendall(9 from 9) -> (786) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
-  sendall(473 from 473) -> (313) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
-  sendall(9 from 9) -> (304) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
-  sendall(9 from 9) -> (295) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
-  sendall(38 from 38) -> (257) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00	\x06namespacephases\\r\\n (esc)
-  sendall(9 from 9) -> (248) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
-  sendall(64 from 64) -> (184) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c	1\npublishing	True\r\n
-  sendall(9 from 9) -> (175) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
-  sendall(9 from 9) -> (166) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
-  sendall(41 from 41) -> (125) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00		namespacebookmarks\\r\\n (esc)
-  sendall(9 from 9) -> (116) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
-  sendall(9 from 9) -> (107) 4\\r\\n\x00\x00\x00\x1d\\r\\n (esc)
-  sendall(35 from 35) -> (72) 1d\\r\\n\x16cache:rev-branch-cache\x00\x00\x00\x03\x00\x00\\r\\n (esc)
-  sendall(9 from 9) -> (63) 4\\r\\n\x00\x00\x00'\\r\\n (esc)
-  sendall(45 from 45) -> (18) 27\\r\\n\x00\x00\x00\x07\x00\x00\x00\x01\x00\x00\x00\x00default\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\\r\\n (esc)
+  $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -20
+  sendall(9 from 9) -> (744) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  sendall(9 from 9) -> (735) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+  sendall(47 from 47) -> (688) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02	\x01version02nbchanges1\\r\\n (esc)
+  sendall(9 from 9) -> (679) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+  sendall(473 from 473) -> (206) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+  sendall(9 from 9) -> (197) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  sendall(9 from 9) -> (188) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+  sendall(38 from 38) -> (150) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00	\x06namespacephases\\r\\n (esc)
+  sendall(9 from 9) -> (141) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
+  sendall(64 from 64) -> (77) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c	1\npublishing	True\r\n
+  sendall(9 from 9) -> (68) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  sendall(9 from 9) -> (59) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
+  sendall(41 from 41) -> (18) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00		namespacebookmarks\\r\\n (esc)
   sendall(9 from 9) -> (9) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
   sendall(9 from 9) -> (0) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
   write limit reached; closing socket
@@ -1193,25 +1187,20 @@
   
 
 #else
-  $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -26
-  write(9 from 9) -> (851) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
-  write(9 from 9) -> (842) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
-  write(47 from 47) -> (795) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02	\x01version02nbchanges1\\r\\n (esc)
-  write(9 from 9) -> (786) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
-  write(473 from 473) -> (313) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
-  write(9 from 9) -> (304) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
-  write(9 from 9) -> (295) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
-  write(38 from 38) -> (257) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00	\x06namespacephases\\r\\n (esc)
-  write(9 from 9) -> (248) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
-  write(64 from 64) -> (184) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c	1\npublishing	True\r\n
-  write(9 from 9) -> (175) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
-  write(9 from 9) -> (166) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
-  write(41 from 41) -> (125) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00		namespacebookmarks\\r\\n (esc)
-  write(9 from 9) -> (116) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
-  write(9 from 9) -> (107) 4\\r\\n\x00\x00\x00\x1d\\r\\n (esc)
-  write(35 from 35) -> (72) 1d\\r\\n\x16cache:rev-branch-cache\x00\x00\x00\x03\x00\x00\\r\\n (esc)
-  write(9 from 9) -> (63) 4\\r\\n\x00\x00\x00'\\r\\n (esc)
-  write(45 from 45) -> (18) 27\\r\\n\x00\x00\x00\x07\x00\x00\x00\x01\x00\x00\x00\x00default\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\\r\\n (esc)
+  $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -21
+  write(9 from 9) -> (744) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (735) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+  write(47 from 47) -> (688) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02	\x01version02nbchanges1\\r\\n (esc)
+  write(9 from 9) -> (679) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+  write(473 from 473) -> (206) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (197) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (188) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+  write(38 from 38) -> (150) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00	\x06namespacephases\\r\\n (esc)
+  write(9 from 9) -> (141) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
+  write(64 from 64) -> (77) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c	1\npublishing	True\r\n
+  write(9 from 9) -> (68) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (59) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
+  write(41 from 41) -> (18) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00		namespacebookmarks\\r\\n (esc)
   write(9 from 9) -> (9) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
   write(9 from 9) -> (0) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
   write limit reached; closing socket
@@ -1227,7 +1216,7 @@
 
 Server sends a size 0 chunked-transfer size without terminating \r\n
 
-  $ hg serve --config badserver.closeaftersendbytes=1865 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1739 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -1243,25 +1232,20 @@
   $ killdaemons.py $DAEMON_PIDS
 
 #if py36
-  $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -26
-  sendall(9 from 9) -> (854) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
-  sendall(9 from 9) -> (845) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
-  sendall(47 from 47) -> (798) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02	\x01version02nbchanges1\\r\\n (esc)
-  sendall(9 from 9) -> (789) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
-  sendall(473 from 473) -> (316) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
-  sendall(9 from 9) -> (307) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
-  sendall(9 from 9) -> (298) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
-  sendall(38 from 38) -> (260) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00	\x06namespacephases\\r\\n (esc)
-  sendall(9 from 9) -> (251) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
-  sendall(64 from 64) -> (187) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c	1\npublishing	True\r\n
-  sendall(9 from 9) -> (178) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
-  sendall(9 from 9) -> (169) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
-  sendall(41 from 41) -> (128) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00		namespacebookmarks\\r\\n (esc)
-  sendall(9 from 9) -> (119) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
-  sendall(9 from 9) -> (110) 4\\r\\n\x00\x00\x00\x1d\\r\\n (esc)
-  sendall(35 from 35) -> (75) 1d\\r\\n\x16cache:rev-branch-cache\x00\x00\x00\x03\x00\x00\\r\\n (esc)
-  sendall(9 from 9) -> (66) 4\\r\\n\x00\x00\x00'\\r\\n (esc)
-  sendall(45 from 45) -> (21) 27\\r\\n\x00\x00\x00\x07\x00\x00\x00\x01\x00\x00\x00\x00default\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\\r\\n (esc)
+  $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -21
+  sendall(9 from 9) -> (747) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  sendall(9 from 9) -> (738) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+  sendall(47 from 47) -> (691) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02	\x01version02nbchanges1\\r\\n (esc)
+  sendall(9 from 9) -> (682) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+  sendall(473 from 473) -> (209) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+  sendall(9 from 9) -> (200) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  sendall(9 from 9) -> (191) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+  sendall(38 from 38) -> (153) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00	\x06namespacephases\\r\\n (esc)
+  sendall(9 from 9) -> (144) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
+  sendall(64 from 64) -> (80) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c	1\npublishing	True\r\n
+  sendall(9 from 9) -> (71) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  sendall(9 from 9) -> (62) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
+  sendall(41 from 41) -> (21) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00		namespacebookmarks\\r\\n (esc)
   sendall(9 from 9) -> (12) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
   sendall(9 from 9) -> (3) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
   sendall(3 from 5) -> (0) 0\r\n
@@ -1272,25 +1256,20 @@
   
 
 #else
-  $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -27
-  write(9 from 9) -> (854) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
-  write(9 from 9) -> (845) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
-  write(47 from 47) -> (798) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02	\x01version02nbchanges1\\r\\n (esc)
-  write(9 from 9) -> (789) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
-  write(473 from 473) -> (316) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
-  write(9 from 9) -> (307) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
-  write(9 from 9) -> (298) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
-  write(38 from 38) -> (260) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00	\x06namespacephases\\r\\n (esc)
-  write(9 from 9) -> (251) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
-  write(64 from 64) -> (187) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c	1\npublishing	True\r\n
-  write(9 from 9) -> (178) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
-  write(9 from 9) -> (169) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
-  write(41 from 41) -> (128) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00		namespacebookmarks\\r\\n (esc)
-  write(9 from 9) -> (119) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
-  write(9 from 9) -> (110) 4\\r\\n\x00\x00\x00\x1d\\r\\n (esc)
-  write(35 from 35) -> (75) 1d\\r\\n\x16cache:rev-branch-cache\x00\x00\x00\x03\x00\x00\\r\\n (esc)
-  write(9 from 9) -> (66) 4\\r\\n\x00\x00\x00'\\r\\n (esc)
-  write(45 from 45) -> (21) 27\\r\\n\x00\x00\x00\x07\x00\x00\x00\x01\x00\x00\x00\x00default\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\\r\\n (esc)
+  $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -22
+  write(9 from 9) -> (747) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (738) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+  write(47 from 47) -> (691) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02	\x01version02nbchanges1\\r\\n (esc)
+  write(9 from 9) -> (682) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+  write(473 from 473) -> (209) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (200) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (191) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+  write(38 from 38) -> (153) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00	\x06namespacephases\\r\\n (esc)
+  write(9 from 9) -> (144) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
+  write(64 from 64) -> (80) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c	1\npublishing	True\r\n
+  write(9 from 9) -> (71) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (62) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
+  write(41 from 41) -> (21) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00		namespacebookmarks\\r\\n (esc)
   write(9 from 9) -> (12) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
   write(9 from 9) -> (3) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
   write(3 from 5) -> (0) 0\r\n
--- a/tests/test-http-protocol.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-http-protocol.t	Thu Mar 18 18:24:59 2021 -0400
@@ -321,7 +321,7 @@
   s>     Content-Type: application/mercurial-cbor\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   sending heads command
   s> setsockopt(6, 1, 1) -> None (?)
   s>     POST /api/exp-http-v2-0003/ro/heads HTTP/1.1\r\n
@@ -437,7 +437,7 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-0.1\r\n
-  s>     Content-Length: 503\r\n
+  s>     Content-Length: 484\r\n
   s>     \r\n
   s>     batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
 
@@ -474,7 +474,7 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-0.1\r\n
-  s>     Content-Length: 503\r\n
+  s>     Content-Length: 484\r\n
   s>     \r\n
   real URL is http://$LOCALIP:$HGPORT/redirected (glob)
   s>     batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
@@ -745,7 +745,7 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-0.1\r\n
-  s>     Content-Length: 503\r\n
+  s>     Content-Length: 484\r\n
   s>     \r\n
   real URL is http://$LOCALIP:$HGPORT/redirected (glob)
   s>     batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
--- a/tests/test-http.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-http.t	Thu Mar 18 18:24:59 2021 -0400
@@ -348,20 +348,20 @@
   list of changesets:
   7f4e523d01f2cc3765ac8934da3d14db775ff872
   bundle2-output-bundle: "HG20", 5 parts total
-  bundle2-output-part: "replycaps" 224 bytes payload
+  bundle2-output-part: "replycaps" 207 bytes payload
   bundle2-output-part: "check:phases" 24 bytes payload
   bundle2-output-part: "check:updated-heads" streamed payload
   bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
   bundle2-output-part: "phase-heads" 24 bytes payload
   sending unbundle command
-  sending 1040 bytes
+  sending 1023 bytes
   devel-peer-request: POST http://localhost:$HGPORT2/?cmd=unbundle
-  devel-peer-request:   Content-length 1040
+  devel-peer-request:   Content-length 1023
   devel-peer-request:   Content-type application/mercurial-0.1
   devel-peer-request:   Vary X-HgArg-1,X-HgProto-1
   devel-peer-request:   X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
   devel-peer-request:   16 bytes of commands arguments in headers
-  devel-peer-request:   1040 bytes of data
+  devel-peer-request:   1023 bytes of data
   devel-peer-request:   finished in *.???? seconds (200) (glob)
   bundle2-input-bundle: no-transaction
   bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
@@ -382,6 +382,7 @@
   devel-peer-request:   16 bytes of commands arguments in headers
   devel-peer-request:   finished in *.???? seconds (200) (glob)
   received listkey for "phases": 15 bytes
+  (sent 9 HTTP requests and * bytes; received * bytes in responses) (glob) (?)
   $ hg rollback -q
 
   $ sed 's/.*] "/"/' < ../access.log
--- a/tests/test-inherit-mode.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-inherit-mode.t	Thu Mar 18 18:24:59 2021 -0400
@@ -134,6 +134,8 @@
   00660 ../push/.hg/00changelog.i
   00770 ../push/.hg/cache/
   00660 ../push/.hg/cache/branch2-base
+  00660 ../push/.hg/cache/rbc-names-v1
+  00660 ../push/.hg/cache/rbc-revs-v1
   00660 ../push/.hg/dirstate
   00660 ../push/.hg/requires
   00770 ../push/.hg/store/
--- a/tests/test-install.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-install.t	Thu Mar 18 18:24:59 2021 -0400
@@ -184,7 +184,7 @@
   $ cd $TESTTMP
   $ unset PYTHONPATH
 
-#if py3 ensurepip
+#if py3 ensurepip network-io
   $ "$PYTHON" -m venv installenv >> pip.log
 
 Hack: Debian does something a bit different in ensurepip.bootstrap. This makes
@@ -197,8 +197,10 @@
 
 Note: we use this weird path to run pip and hg to avoid platform differences,
 since it's bin on most platforms but Scripts on Windows.
-  $ ./installenv/*/pip install --no-index $TESTDIR/.. >> pip.log
+  $ ./installenv/*/pip install $TESTDIR/.. >> pip.log
     Failed building wheel for mercurial (?)
+  WARNING: You are using pip version *; however, version * is available. (glob) (?)
+  You should consider upgrading via the '$TESTTMP/installenv/bin/python* -m pip install --upgrade pip' command. (glob) (?)
   $ ./installenv/*/hg debuginstall || cat pip.log
   checking encoding (ascii)...
   checking Python executable (*) (glob)
@@ -222,17 +224,17 @@
   no problems detected
 #endif
 
-#if virtualenv no-py3
+#if virtualenv no-py3 network-io
 
 Note: --no-site-packages is the default for all versions enabled by hghave
 
-  $ "$PYTHON" -m virtualenv --never-download installenv >> pip.log
+  $ "$PYTHON" -m virtualenv installenv >> pip.log
   DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. (?)
   DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support (?)
 
 Note: we use this weird path to run pip and hg to avoid platform differences,
 since it's bin on most platforms but Scripts on Windows.
-  $ ./installenv/*/pip install --no-index $TESTDIR/.. >> pip.log
+  $ ./installenv/*/pip install $TESTDIR/.. >> pip.log
   DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. (?)
   DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support (?)
   DEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. More details about Python 2 support in pip can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support pip 21.0 will remove support for this functionality. (?)
--- a/tests/test-largefiles.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-largefiles.t	Thu Mar 18 18:24:59 2021 -0400
@@ -1751,7 +1751,7 @@
   $ hg rm sub2/large6
   $ hg up -r.
   abort: outstanding uncommitted merge
-  [255]
+  [20]
 
 - revert should be able to revert files introduced in a pending merge
   $ hg revert --all -r .
--- a/tests/test-lfs-serve-access.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-lfs-serve-access.t	Thu Mar 18 18:24:59 2021 -0400
@@ -66,7 +66,7 @@
   $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
-  $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+  $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
   $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
 
   $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
@@ -110,9 +110,7 @@
   bundle2-input-part: "listkeys" (params: 1 mandatory) supported
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
-  bundle2-input-part: total payload size 39
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 3 parts total
   checking for updated bookmarks
   updating the branch cache
   added 1 changesets with 1 changes to 1 files
@@ -167,7 +165,7 @@
   $LOCALIP - - [$LOGDATE$] "POST /missing/objects/batch HTTP/1.1" 404 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=capabilities HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
-  $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+  $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
   $LOCALIP - - [$LOGDATE$] "POST /subdir/mount/point/.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e HTTP/1.1" 200 - (glob)
 
@@ -313,7 +311,7 @@
   $ cat $TESTTMP/access.log
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
-  $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+  $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
   $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
@@ -332,7 +330,7 @@
   $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c HTTP/1.1" 422 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
-  $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=525251863cad618e55d483555f3d00a2ca99597e&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+  $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=525251863cad618e55d483555f3d00a2ca99597e&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
   $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 500 - (glob)
   $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
@@ -483,7 +481,7 @@
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
-  $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+  $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Acheckheads%253Drelated%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
   $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
   $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 200 - (glob)
--- a/tests/test-lfs-serve.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-lfs-serve.t	Thu Mar 18 18:24:59 2021 -0400
@@ -462,6 +462,7 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
+  (sent 8 HTTP requests and * bytes; received * bytes in responses) (glob) (?)
   $ grep 'lfs' .hg/requires $SERVER_REQUIRES
   .hg/requires:lfs
   $TESTTMP/server/.hg/requires:lfs
--- a/tests/test-log.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-log.t	Thu Mar 18 18:24:59 2021 -0400
@@ -2001,6 +2001,26 @@
   @@ -0,0 +1,1 @@
   +b
   
+
+Test that diff.merge is respected (file b was added on one side and
+and therefore merged cleanly)
+
+  $ hg log -pr 3 --config diff.merge=yes
+  changeset:   3:8e07aafe1edc
+  tag:         tip
+  parent:      2:b09be438c43a
+  parent:      1:925d80f479bb
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     3
+  
+  diff -r 8e07aafe1edc a
+  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,1 +1,1 @@
+  -b
+  +c
+  
   $ cd ..
 
 'hg log -r rev fn' when last(filelog(fn)) != rev
--- a/tests/test-mactext.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-mactext.t	Thu Mar 18 18:24:59 2021 -0400
@@ -27,7 +27,7 @@
   transaction abort!
   rollback completed
   abort: pretxncommit.cr hook failed
-  [255]
+  [40]
   $ hg cat f | f --hexdump
   
   0000: 68 65 6c 6c 6f 0a                               |hello.|
--- a/tests/test-manifest.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-manifest.py	Thu Mar 18 18:24:59 2021 -0400
@@ -6,6 +6,8 @@
 import unittest
 import zlib
 
+from mercurial.node import sha1nodeconstants
+
 from mercurial import (
     manifest as manifestmod,
     match as matchmod,
@@ -436,7 +438,7 @@
 
 class testtreemanifest(unittest.TestCase, basemanifesttests):
     def parsemanifest(self, text):
-        return manifestmod.treemanifest(b'', text)
+        return manifestmod.treemanifest(sha1nodeconstants, b'', text)
 
     def testWalkSubtrees(self):
         m = self.parsemanifest(A_DEEPER_MANIFEST)
--- a/tests/test-merge-remove.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-merge-remove.t	Thu Mar 18 18:24:59 2021 -0400
@@ -95,7 +95,7 @@
   $ hg merge
   bar: untracked file differs
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
   $ cat bar
   memories of buried pirate treasure
 
--- a/tests/test-merge-subrepos.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-merge-subrepos.t	Thu Mar 18 18:24:59 2021 -0400
@@ -117,10 +117,17 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
 XXX: There's a difference between wdir() and '.', so there should be a status.
-`hg files -S` from the top is also missing 'subrepo/b'.
+`hg files -S` from the top is also missing 'subrepo/b'. The files should be
+seen as deleted (and, maybe even missing? in which case `hg files` should list
+it)
 
   $ hg st -S
+  R subrepo/b (missing-correct-output !)
   $ hg st -R subrepo
+  R subrepo/b (missing-correct-output !)
+
+(note: return [1] because no files "match" since the list is empty)
+
   $ hg files -R subrepo
   [1]
   $ hg files -R subrepo -r '.'
--- a/tests/test-merge-tools.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-merge-tools.t	Thu Mar 18 18:24:59 2021 -0400
@@ -377,7 +377,7 @@
   merging f
   some fail message
   abort: $TESTTMP/mybrokenmerge.py hook failed
-  [255]
+  [40]
   $ aftermerge
   # cat f
   revision 1
--- a/tests/test-merge1.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-merge1.t	Thu Mar 18 18:24:59 2021 -0400
@@ -113,7 +113,7 @@
   $ hg merge 1
   b: untracked file differs
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
 
 #if symlink
 symlinks to directories should be treated as regular files (issue5027)
@@ -122,7 +122,7 @@
   $ hg merge 1
   b: untracked file differs
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
 symlinks shouldn't be followed
   $ rm b
   $ echo This is file b1 > .hg/b
@@ -130,7 +130,7 @@
   $ hg merge 1
   b: untracked file differs
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
 
   $ rm b
   $ echo This is file b2 > b
@@ -144,7 +144,7 @@
   $ hg merge 1 --config merge.checkunknown=abort
   b: untracked file differs
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
 
 this merge should warn
   $ hg merge 1 --config merge.checkunknown=warn
@@ -188,7 +188,7 @@
   $ hg merge 3 --config merge.checkignored=ignore --config merge.checkunknown=abort
   remoteignored: untracked file differs
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
   $ hg merge 3 --config merge.checkignored=abort --config merge.checkunknown=ignore
   merging .hgignore
   merging for .hgignore
@@ -210,15 +210,15 @@
   b: untracked file differs
   localignored: untracked file differs
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
   $ hg merge 3 --config merge.checkignored=abort --config merge.checkunknown=ignore
   localignored: untracked file differs
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
   $ hg merge 3 --config merge.checkignored=warn --config merge.checkunknown=abort
   b: untracked file differs
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
   $ hg merge 3 --config merge.checkignored=warn --config merge.checkunknown=warn
   b: replacing untracked file
   localignored: replacing untracked file
--- a/tests/test-minirst.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-minirst.py	Thu Mar 18 18:24:59 2021 -0400
@@ -159,6 +159,8 @@
 :a: First item.
 :ab: Second item. Indentation and wrapping
      is handled automatically.
+:c\:d: a key with colon
+:efg\:\:hh: a key with many colon
 
 Next list:
 
--- a/tests/test-minirst.py.out	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-minirst.py.out	Thu Mar 18 18:24:59 2021 -0400
@@ -439,6 +439,8 @@
 a             First item.
 ab            Second item. Indentation and wrapping is
               handled automatically.
+c:d           a key with colon
+efg::hh       a key with many colon
 
 Next list:
 
@@ -456,6 +458,9 @@
               wrapping is
               handled
               automatically.
+c:d           a key with colon
+efg::hh       a key with many
+              colon
 
 Next list:
 
@@ -476,6 +481,10 @@
  <dd>First item.
  <dt>ab
  <dd>Second item. Indentation and wrapping is handled automatically.
+ <dt>c:d
+ <dd>a key with colon
+ <dt>efg::hh
+ <dd>a key with many colon
 </dl>
 <p>
 Next list:
--- a/tests/test-mq-qfold.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-mq-qfold.t	Thu Mar 18 18:24:59 2021 -0400
@@ -235,7 +235,7 @@
   rollback completed
   qrefresh interrupted while patch was popped! (revert --all, qpush to recover)
   abort: pretxncommit.unexpectedabort hook exited with status 1
-  [255]
+  [40]
   $ cat .hg/last-message.txt
   original message
   
--- a/tests/test-mq-qnew.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-mq-qnew.t	Thu Mar 18 18:24:59 2021 -0400
@@ -310,7 +310,7 @@
   note: commit message saved in .hg/last-message.txt
   note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
   abort: pretxncommit.unexpectedabort hook exited with status 1
-  [255]
+  [40]
   $ cat .hg/last-message.txt
   
   
--- a/tests/test-mq-qrefresh-replace-log-message.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-mq-qrefresh-replace-log-message.t	Thu Mar 18 18:24:59 2021 -0400
@@ -191,7 +191,7 @@
   rollback completed
   qrefresh interrupted while patch was popped! (revert --all, qpush to recover)
   abort: pretxncommit.unexpectedabort hook exited with status 1
-  [255]
+  [40]
   $ cat .hg/last-message.txt
   Fifth commit message
    This is the 5th log message
@@ -235,7 +235,7 @@
   rollback completed
   qrefresh interrupted while patch was popped! (revert --all, qpush to recover)
   abort: pretxncommit.unexpectedabort hook exited with status 1
-  [255]
+  [40]
 
 (rebuilding at failure of qrefresh bases on rev #0, and it causes
 dropping status of "file2")
@@ -273,7 +273,7 @@
   rollback completed
   qrefresh interrupted while patch was popped! (revert --all, qpush to recover)
   abort: pretxncommit.unexpectedabort hook exited with status 1
-  [255]
+  [40]
 
   $ sh "$TESTTMP/checkvisibility.sh"
   ====
@@ -315,7 +315,7 @@
   rollback completed
   qrefresh interrupted while patch was popped! (revert --all, qpush to recover)
   abort: pretxncommit.unexpectedabort hook exited with status 1
-  [255]
+  [40]
 
   $ sh "$TESTTMP/checkvisibility.sh"
   ====
--- a/tests/test-narrow-exchange.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-narrow-exchange.t	Thu Mar 18 18:24:59 2021 -0400
@@ -105,7 +105,7 @@
   remote: adding file changes
   remote: transaction abort!
   remote: rollback completed
-  remote: abort: data/inside2/f.i@4a1aa07735e6: unknown parent (reporevlogstore !)
+  remote: abort: data/inside2/f.i@4a1aa07735e673e20c00fae80f40dc301ee30616: unknown parent (reporevlogstore !)
   remote: abort: data/inside2/f/index@4a1aa07735e6: no node (reposimplestore !)
   abort: stream ended unexpectedly (got 0 bytes, expected 4)
   [255]
@@ -218,8 +218,8 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 0 changes to 0 files (no-lfs-on !)
-  remote: error: pretxnchangegroup.lfs hook raised an exception: data/inside2/f.i@f59b4e021835: no match found (lfs-on !)
+  remote: error: pretxnchangegroup.lfs hook raised an exception: data/inside2/f.i@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
   remote: transaction abort! (lfs-on !)
   remote: rollback completed (lfs-on !)
-  remote: abort: data/inside2/f.i@f59b4e021835: no match found (lfs-on !)
+  remote: abort: data/inside2/f.i@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
   abort: stream ended unexpectedly (got 0 bytes, expected 4) (lfs-on !)
--- a/tests/test-narrow-pull.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-narrow-pull.t	Thu Mar 18 18:24:59 2021 -0400
@@ -78,7 +78,7 @@
   transaction abort!
   rollback completed
   abort: pretxnchangegroup.bad hook exited with status 1
-  [255]
+  [40]
   $ hg id
   223311e70a6f tip
 
@@ -147,6 +147,7 @@
   $ hg clone -q --narrow ssh://user@dummy/master narrow2 --include "f1" -r 0
   $ cd narrow2
   $ hg pull -q -r 1
+  remote: abort: unexpected error: unable to resolve parent while packing '00manifest.i' 1 for changeset 0
   transaction abort!
   rollback completed
   abort: pull failed on remote
--- a/tests/test-narrow-shallow-merges.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-narrow-shallow-merges.t	Thu Mar 18 18:24:59 2021 -0400
@@ -179,7 +179,7 @@
   
 
   $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort
-  ...2a20009de83e 000000000000 3ac1f5779de3 outside 10
+  ...2a20009de83e 3ac1f5779de3 000000000000 outside 10
   ...3ac1f5779de3 bb96a08b062a 465567bdfb2d merge a/b/c/d 9
   ...8d874d57adea 7ef88b4dd4fa 000000000000 outside 12
   ...b844052e7b3b 000000000000 000000000000 outside 2c
--- a/tests/test-narrow-trackedcmd.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-narrow-trackedcmd.t	Thu Mar 18 18:24:59 2021 -0400
@@ -110,6 +110,8 @@
       --clear                      whether to replace the existing narrowspec
       --force-delete-local-changes forces deletion of local changes when
                                    narrowing
+      --[no-]backup                back up local changes when narrowing
+                                   (default: on)
       --update-working-copy        update working copy when the store has
                                    changed
    -e --ssh CMD                    specify ssh command to use
--- a/tests/test-narrow-widen.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-narrow-widen.t	Thu Mar 18 18:24:59 2021 -0400
@@ -431,7 +431,7 @@
   transaction abort!
   rollback completed
   abort: pretxnchangegroup.bad hook exited with status 1
-  [255]
+  [40]
   $ hg l
   $ hg bookmarks
   no bookmarks set
--- a/tests/test-narrow.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-narrow.t	Thu Mar 18 18:24:59 2021 -0400
@@ -492,3 +492,33 @@
   searching for changes
   looking for unused includes to remove
   found no unused includes
+Test --no-backup
+  $ hg tracked --addinclude d0 --addinclude d2 -q
+  $ hg unbundle .hg/strip-backup/*-narrow.hg -q
+  $ rm .hg/strip-backup/*
+  $ hg tracked --auto-remove-includes --no-backup
+  comparing with ssh://user@dummy/master
+  searching for changes
+  looking for unused includes to remove
+  path:d0
+  path:d2
+  remove these unused includes (yn)? y
+  looking for local changes to affected paths
+  deleting data/d0/f.i
+  deleting data/d2/f.i
+  deleting meta/d0/00manifest.i (tree !)
+  deleting meta/d2/00manifest.i (tree !)
+  $ ls .hg/strip-backup/
+
+
+Test removing include while concurrently modifying file in that path
+  $ hg clone --narrow ssh://user@dummy/master narrow-concurrent-modify -q \
+  > --include d0 --include d1
+  $ cd narrow-concurrent-modify
+  $ hg --config 'hooks.pretxnopen = echo modified >> d0/f' tracked --removeinclude d0
+  comparing with ssh://user@dummy/master
+  searching for changes
+  looking for local changes to affected paths
+  deleting data/d0/f.i
+  deleting meta/d0/00manifest.i (tree !)
+  not deleting possibly dirty file d0/f
--- a/tests/test-obsolete-changeset-exchange.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-obsolete-changeset-exchange.t	Thu Mar 18 18:24:59 2021 -0400
@@ -158,11 +158,10 @@
   list of changesets:
   bec0734cd68e84477ba7fc1d13e6cff53ab70129
   listing keys for "bookmarks"
-  bundle2-output-bundle: "HG20", 4 parts total
+  bundle2-output-bundle: "HG20", 3 parts total
   bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
   bundle2-output-part: "listkeys" (params: 1 mandatory) empty payload
   bundle2-output-part: "phase-heads" 24 bytes payload
-  bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
   adding changesets
@@ -174,9 +173,7 @@
   bundle2-input-part: "listkeys" (params: 1 mandatory) supported
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
-  bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
-  bundle2-input-part: total payload size 39
-  bundle2-input-bundle: 4 parts total
+  bundle2-input-bundle: 3 parts total
   checking for updated bookmarks
   updating the branch cache
   added 1 changesets with 1 changes to 1 files (+1 heads)
--- a/tests/test-obsolete-distributed.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-obsolete-distributed.t	Thu Mar 18 18:24:59 2021 -0400
@@ -151,12 +151,11 @@
   list of changesets:
   391a2bf12b1b8b05a72400ae36b26d50a091dc22
   listing keys for "bookmarks"
-  bundle2-output-bundle: "HG20", 5 parts total
+  bundle2-output-bundle: "HG20", 4 parts total
   bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
   bundle2-output-part: "listkeys" (params: 1 mandatory) empty payload
   bundle2-output-part: "obsmarkers" streamed payload
   bundle2-output-part: "phase-heads" 48 bytes payload
-  bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
   adding changesets
@@ -170,9 +169,7 @@
   bundle2-input-part: total payload size 143
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 48
-  bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
-  bundle2-input-part: total payload size 39
-  bundle2-input-bundle: 5 parts total
+  bundle2-input-bundle: 4 parts total
   checking for updated bookmarks
   adding 1 changesets with 1 changes to 1 files (+1 heads)
   1 new obsolescence markers
--- a/tests/test-parse-date.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-parse-date.t	Thu Mar 18 18:24:59 2021 -0400
@@ -103,43 +103,43 @@
 
   $ hg log -d "--2"
   abort: -2 must be nonnegative (see 'hg help dates')
-  [255]
+  [10]
 
 Whitespace only
 
   $ hg log -d " "
   abort: dates cannot consist entirely of whitespace
-  [255]
+  [10]
 
 Test date formats with '>' or '<' accompanied by space characters
 
   $ hg log -d '>' --template '{date|date}\n'
   abort: invalid day spec, use '>DATE'
-  [255]
+  [10]
   $ hg log -d '<' --template '{date|date}\n'
   abort: invalid day spec, use '<DATE'
-  [255]
+  [10]
 
   $ hg log -d ' >' --template '{date|date}\n'
   abort: invalid day spec, use '>DATE'
-  [255]
+  [10]
   $ hg log -d ' <' --template '{date|date}\n'
   abort: invalid day spec, use '<DATE'
-  [255]
+  [10]
 
   $ hg log -d '> ' --template '{date|date}\n'
   abort: invalid day spec, use '>DATE'
-  [255]
+  [10]
   $ hg log -d '< ' --template '{date|date}\n'
   abort: invalid day spec, use '<DATE'
-  [255]
+  [10]
 
   $ hg log -d ' > ' --template '{date|date}\n'
   abort: invalid day spec, use '>DATE'
-  [255]
+  [10]
   $ hg log -d ' < ' --template '{date|date}\n'
   abort: invalid day spec, use '<DATE'
-  [255]
+  [10]
 
   $ hg log -d '>02/01' --template '{date|date}\n'
   $ hg log -d '<02/01' --template '{date|date}\n'
--- a/tests/test-parseindex2.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-parseindex2.py	Thu Mar 18 18:24:59 2021 -0400
@@ -117,8 +117,8 @@
 )
 
 
-def parse_index2(data, inline):
-    index, chunkcache = parsers.parse_index2(data, inline)
+def parse_index2(data, inline, revlogv2=False):
+    index, chunkcache = parsers.parse_index2(data, inline, revlogv2=revlogv2)
     return list(index), chunkcache
 
 
--- a/tests/test-pathconflicts-basic.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-pathconflicts-basic.t	Thu Mar 18 18:24:59 2021 -0400
@@ -53,7 +53,7 @@
   $ hg up file
   a: untracked directory conflicts with file
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
   $ hg up --clean file
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (activating bookmark file)
--- a/tests/test-pathconflicts-update.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-pathconflicts-update.t	Thu Mar 18 18:24:59 2021 -0400
@@ -49,7 +49,7 @@
   $ hg up dir
   a/b: untracked file conflicts with directory
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
   $ hg up dir --config merge.checkunknown=warn
   a/b: replacing untracked file
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -70,7 +70,7 @@
   $ hg up dir
   a/b: untracked file conflicts with directory
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
   $ hg up dir --config merge.checkunknown=warn
   a/b: replacing untracked file
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -89,7 +89,7 @@
   $ hg up file
   a/b: untracked directory conflicts with file
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
   $ hg up file --config merge.checkunknown=warn
   a/b: replacing untracked files in directory
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -107,7 +107,7 @@
   $ hg up link
   a/b: untracked directory conflicts with file
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
   $ hg up link --config merge.checkunknown=warn
   a/b: replacing untracked files in directory
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-persistent-nodemap.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-persistent-nodemap.t	Thu Mar 18 18:24:59 2021 -0400
@@ -56,9 +56,9 @@
   generaldelta:       yes
   share-safe:          no
   sparserevlog:       yes
-  sidedata:            no
   persistent-nodemap: yes
   copies-sdc:          no
+  revlog-v2:           no
   plain-cl-delta:     yes
   compression:        zlib
   compression-level:  default
@@ -575,13 +575,13 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:            no     no      no
   persistent-nodemap: yes     no      no
   copies-sdc:          no     no      no
+  revlog-v2:           no     no      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib
   compression-level:  default default default
-  $ hg debugupgraderepo --run --no-backup --quiet
+  $ hg debugupgraderepo --run --no-backup
   upgrade will perform the following actions:
   
   requirements
@@ -593,8 +593,17 @@
     - changelog
     - manifest
   
+  beginning upgrade...
+  repository locked and read-only
+  creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob)
+  (it is safe to interrupt this process any time before data migration completes)
+  downgrading repository to not use persistent nodemap feature
+  removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob)
   $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
-  [1]
+  00changelog-*.nd (glob)
+  00manifest-*.nd (glob)
+  undo.backup.00changelog.n
+  undo.backup.00manifest.n
   $ hg debugnodemap --metadata
 
 
@@ -611,29 +620,40 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:            no     no      no
   persistent-nodemap:  no    yes      no
   copies-sdc:          no     no      no
+  revlog-v2:           no     no      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib
   compression-level:  default default default
-  $ hg debugupgraderepo --run --no-backup --quiet
+  $ hg debugupgraderepo --run --no-backup
   upgrade will perform the following actions:
   
   requirements
      preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
      added: persistent-nodemap
   
+  persistent-nodemap
+     Speedup revision lookup by node id.
+  
   processed revlogs:
     - all-filelogs
     - changelog
     - manifest
   
+  beginning upgrade...
+  repository locked and read-only
+  creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob)
+  (it is safe to interrupt this process any time before data migration completes)
+  upgrading repository to use persistent nodemap feature
+  removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob)
   $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
   00changelog-*.nd (glob)
   00changelog.n
   00manifest-*.nd (glob)
   00manifest.n
+  undo.backup.00changelog.n
+  undo.backup.00manifest.n
 
   $ hg debugnodemap --metadata
   uid: * (glob)
--- a/tests/test-phases.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-phases.t	Thu Mar 18 18:24:59 2021 -0400
@@ -757,7 +757,7 @@
   transaction abort!
   rollback completed
   abort: pretxnclose hook exited with status 1
-  [255]
+  [40]
   $ cp .hg/store/phaseroots.pending.saved .hg/store/phaseroots.pending
 
 (check (in)visibility of phaseroot while transaction running in repo)
@@ -780,7 +780,7 @@
   transaction abort!
   rollback completed
   abort: pretxnclose hook exited with status 1
-  [255]
+  [40]
 
 Check that pretxnclose-phase hook can control phase movement
 
@@ -854,12 +854,12 @@
   transaction abort!
   rollback completed
   abort: pretxnclose-phase.nopublish_D hook exited with status 1
-  [255]
+  [40]
   $ hg phase --public a603bfb5a83e
   transaction abort!
   rollback completed
   abort: pretxnclose-phase.nopublish_D hook exited with status 1
-  [255]
+  [40]
   $ hg phase --draft 17a481b3bccb
   test-debug-phase: move rev 3: 2 -> 1
   test-debug-phase: move rev 4: 2 -> 1
@@ -871,7 +871,7 @@
   transaction abort!
   rollback completed
   abort: pretxnclose-phase.nopublish_D hook exited with status 1
-  [255]
+  [40]
 
   $ cd ..
 
--- a/tests/test-pull-bundle.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-pull-bundle.t	Thu Mar 18 18:24:59 2021 -0400
@@ -185,7 +185,7 @@
   adding changesets
   adding manifests
   adding file changes
-  abort: 00changelog.i@66f7d451a68b: no node
+  abort: 00changelog.i@66f7d451a68b85ed82ff5fcc254daf50c74144bd: no node
   [50]
   $ cd ..
   $ killdaemons.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-pull-network.t	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,137 @@
+#require serve
+
+#testcases sshv1 sshv2
+
+#if sshv2
+  $ cat >> $HGRCPATH << EOF
+  > [experimental]
+  > sshpeer.advertise-v2 = true
+  > sshserver.support-v2 = true
+  > EOF
+#endif
+
+  $ hg init test
+  $ cd test
+
+  $ echo foo>foo
+  $ hg addremove
+  adding foo
+  $ hg commit -m 1
+
+  $ hg verify
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checked 1 changesets with 1 changes to 1 files
+
+  $ hg serve -p $HGPORT -d --pid-file=hg.pid
+  $ cat hg.pid >> $DAEMON_PIDS
+  $ cd ..
+
+  $ hg clone --pull http://foo:bar@localhost:$HGPORT/ copy
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  new changesets 340e38bdcde4
+  updating to branch default
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ cd copy
+  $ hg verify
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checked 1 changesets with 1 changes to 1 files
+
+  $ hg co
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cat foo
+  foo
+
+  $ hg manifest --debug
+  2ed2a3912a0b24502043eae84ee4b279c18b90dd 644   foo
+
+  $ hg pull
+  pulling from http://foo@localhost:$HGPORT/
+  searching for changes
+  no changes found
+
+  $ hg rollback --dry-run --verbose
+  repository tip rolled back to revision -1 (undo pull: http://foo:***@localhost:$HGPORT/)
+
+Test pull of non-existing 20 character revision specification, making sure plain ascii identifiers
+not are encoded like a node:
+
+  $ hg pull -r 'xxxxxxxxxxxxxxxxxxxy'
+  pulling from http://foo@localhost:$HGPORT/
+  abort: unknown revision 'xxxxxxxxxxxxxxxxxxxy'
+  [255]
+  $ hg pull -r 'xxxxxxxxxxxxxxxxxx y'
+  pulling from http://foo@localhost:$HGPORT/
+  abort: unknown revision 'xxxxxxxxxxxxxxxxxx y'
+  [255]
+
+Test pull of working copy revision
+  $ hg pull -r 'ffffffffffff'
+  pulling from http://foo@localhost:$HGPORT/
+  abort: unknown revision 'ffffffffffff'
+  [255]
+
+Test 'file:' uri handling:
+
+  $ hg pull -q file://../test-does-not-exist
+  abort: file:// URLs can only refer to localhost
+  [255]
+
+  $ hg pull -q file://../test
+  abort: file:// URLs can only refer to localhost
+  [255]
+
+MSYS changes 'file:' into 'file;'
+
+#if no-msys
+  $ hg pull -q file:../test  # no-msys
+#endif
+
+It's tricky to make file:// URLs working on every platform with
+regular shell commands.
+
+  $ URL=`"$PYTHON" -c "from __future__ import print_function; import os; print('file://foobar' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test')"`
+  $ hg pull -q "$URL"
+  abort: file:// URLs can only refer to localhost
+  [255]
+
+  $ URL=`"$PYTHON" -c "from __future__ import print_function; import os; print('file://localhost' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test')"`
+  $ hg pull -q "$URL"
+
+SEC: check for unsafe ssh url
+
+  $ cat >> $HGRCPATH << EOF
+  > [ui]
+  > ssh = sh -c "read l; read l; read l"
+  > EOF
+
+  $ hg pull 'ssh://-oProxyCommand=touch${IFS}owned/path'
+  pulling from ssh://-oProxyCommand%3Dtouch%24%7BIFS%7Downed/path
+  abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
+  [255]
+  $ hg pull 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
+  pulling from ssh://-oProxyCommand%3Dtouch%24%7BIFS%7Downed/path
+  abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
+  [255]
+  $ hg pull 'ssh://fakehost|touch${IFS}owned/path'
+  pulling from ssh://fakehost%7Ctouch%24%7BIFS%7Downed/path
+  abort: no suitable response from remote hg
+  [255]
+  $ hg --config ui.timestamp-output=true pull 'ssh://fakehost%7Ctouch%20owned/path'
+  \[20[2-9][0-9]-[01][0-9]-[0-3][0-9]T[0-5][0-9]:[0-5][0-9]:[0-5][0-9]\.[0-9][0-9][0-9][0-9][0-9][0-9]\] pulling from ssh://fakehost%7Ctouch%20owned/path (re)
+  \[20[2-9][0-9]-[01][0-9]-[0-3][0-9]T[0-5][0-9]:[0-5][0-9]:[0-5][0-9]\.[0-9][0-9][0-9][0-9][0-9][0-9]\] abort: no suitable response from remote hg (re)
+  [255]
+
+  $ [ ! -f owned ] || echo 'you got owned'
+
+  $ cd ..
--- a/tests/test-pull-update.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-pull-update.t	Thu Mar 18 18:24:59 2021 -0400
@@ -246,3 +246,25 @@
      active-before-pull        3:483b76ad4309
 
   $ cd ..
+
+Issue622: hg init && hg pull -u URL doesn't checkout default branch
+
+  $ hg init test
+  $ cd test
+  $ echo foo>foo
+  $ hg addremove
+  adding foo
+  $ hg commit -m 1
+  $ cd ..
+
+  $ hg init empty
+  $ cd empty
+  $ hg pull -u ../test
+  pulling from ../test
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  new changesets 340e38bdcde4
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-pull.t	Sat Mar 13 02:09:23 2021 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,152 +0,0 @@
-#require serve
-
-#testcases sshv1 sshv2
-
-#if sshv2
-  $ cat >> $HGRCPATH << EOF
-  > [experimental]
-  > sshpeer.advertise-v2 = true
-  > sshserver.support-v2 = true
-  > EOF
-#endif
-
-  $ hg init test
-  $ cd test
-
-  $ echo foo>foo
-  $ hg addremove
-  adding foo
-  $ hg commit -m 1
-
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
-
-  $ hg serve -p $HGPORT -d --pid-file=hg.pid
-  $ cat hg.pid >> $DAEMON_PIDS
-  $ cd ..
-
-  $ hg clone --pull http://foo:bar@localhost:$HGPORT/ copy
-  requesting all changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 1 files
-  new changesets 340e38bdcde4
-  updating to branch default
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-
-  $ cd copy
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
-
-  $ hg co
-  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ cat foo
-  foo
-
-  $ hg manifest --debug
-  2ed2a3912a0b24502043eae84ee4b279c18b90dd 644   foo
-
-  $ hg pull
-  pulling from http://foo@localhost:$HGPORT/
-  searching for changes
-  no changes found
-
-  $ hg rollback --dry-run --verbose
-  repository tip rolled back to revision -1 (undo pull: http://foo:***@localhost:$HGPORT/)
-
-Test pull of non-existing 20 character revision specification, making sure plain ascii identifiers
-not are encoded like a node:
-
-  $ hg pull -r 'xxxxxxxxxxxxxxxxxxxy'
-  pulling from http://foo@localhost:$HGPORT/
-  abort: unknown revision 'xxxxxxxxxxxxxxxxxxxy'
-  [255]
-  $ hg pull -r 'xxxxxxxxxxxxxxxxxx y'
-  pulling from http://foo@localhost:$HGPORT/
-  abort: unknown revision 'xxxxxxxxxxxxxxxxxx y'
-  [255]
-
-Test pull of working copy revision
-  $ hg pull -r 'ffffffffffff'
-  pulling from http://foo@localhost:$HGPORT/
-  abort: unknown revision 'ffffffffffff'
-  [255]
-
-Issue622: hg init && hg pull -u URL doesn't checkout default branch
-
-  $ cd ..
-  $ hg init empty
-  $ cd empty
-  $ hg pull -u ../test
-  pulling from ../test
-  requesting all changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 1 files
-  new changesets 340e38bdcde4
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-
-Test 'file:' uri handling:
-
-  $ hg pull -q file://../test-does-not-exist
-  abort: file:// URLs can only refer to localhost
-  [255]
-
-  $ hg pull -q file://../test
-  abort: file:// URLs can only refer to localhost
-  [255]
-
-MSYS changes 'file:' into 'file;'
-
-#if no-msys
-  $ hg pull -q file:../test  # no-msys
-#endif
-
-It's tricky to make file:// URLs working on every platform with
-regular shell commands.
-
-  $ URL=`"$PYTHON" -c "from __future__ import print_function; import os; print('file://foobar' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test')"`
-  $ hg pull -q "$URL"
-  abort: file:// URLs can only refer to localhost
-  [255]
-
-  $ URL=`"$PYTHON" -c "from __future__ import print_function; import os; print('file://localhost' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test')"`
-  $ hg pull -q "$URL"
-
-SEC: check for unsafe ssh url
-
-  $ cat >> $HGRCPATH << EOF
-  > [ui]
-  > ssh = sh -c "read l; read l; read l"
-  > EOF
-
-  $ hg pull 'ssh://-oProxyCommand=touch${IFS}owned/path'
-  pulling from ssh://-oProxyCommand%3Dtouch%24%7BIFS%7Downed/path
-  abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
-  [255]
-  $ hg pull 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
-  pulling from ssh://-oProxyCommand%3Dtouch%24%7BIFS%7Downed/path
-  abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
-  [255]
-  $ hg pull 'ssh://fakehost|touch${IFS}owned/path'
-  pulling from ssh://fakehost%7Ctouch%24%7BIFS%7Downed/path
-  abort: no suitable response from remote hg
-  [255]
-  $ hg --config ui.timestamp-output=true pull 'ssh://fakehost%7Ctouch%20owned/path'
-  \[20[2-9][0-9]-[01][0-9]-[0-3][0-9]T[0-5][0-9]:[0-5][0-9]:[0-5][0-9]\.[0-9][0-9][0-9][0-9][0-9][0-9]\] pulling from ssh://fakehost%7Ctouch%20owned/path (re)
-  \[20[2-9][0-9]-[01][0-9]-[0-3][0-9]T[0-5][0-9]:[0-5][0-9]:[0-5][0-9]\.[0-9][0-9][0-9][0-9][0-9][0-9]\] abort: no suitable response from remote hg (re)
-  [255]
-
-  $ [ ! -f owned ] || echo 'you got owned'
-
-  $ cd ..
--- a/tests/test-purge.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-purge.t	Thu Mar 18 18:24:59 2021 -0400
@@ -1,8 +1,3 @@
-  $ cat <<EOF >> $HGRCPATH
-  > [extensions]
-  > purge =
-  > EOF
-
 init
 
   $ hg init t
@@ -18,11 +13,35 @@
   $ echo 'ignored' > .hgignore
   $ hg ci -qAmr3 -d'2 0'
 
+purge without the extension
+
+  $ hg st
+  $ touch foo
+  $ hg purge
+  permanently delete 1 unkown files? (yN) n
+  abort: removal cancelled
+  [250]
+  $ hg st
+  ? foo
+  $ hg purge --no-confirm
+  $ hg st
+
+now enabling the extension
+
+  $ cat <<EOF >> $HGRCPATH
+  > [extensions]
+  > purge =
+  > EOF
+
 delete an empty directory
 
   $ mkdir empty_dir
   $ hg purge -p -v
   empty_dir
+  $ hg purge --confirm
+  permanently delete at least 1 empty directories? (yN) n
+  abort: removal cancelled
+  [250]
   $ hg purge -v
   removing directory empty_dir
   $ ls -A
@@ -62,6 +81,10 @@
   $ hg purge -p
   untracked_file
   untracked_file_readonly
+  $ hg purge --confirm
+  permanently delete 2 unkown files? (yN) n
+  abort: removal cancelled
+  [250]
   $ hg purge -v
   removing file untracked_file
   removing file untracked_file_readonly
@@ -121,6 +144,10 @@
   $ cd directory
   $ hg purge -p ../untracked_directory
   untracked_directory/nested_directory
+  $ hg purge --confirm
+  permanently delete 1 unkown files? (yN) n
+  abort: removal cancelled
+  [250]
   $ hg purge -v ../untracked_directory
   removing directory untracked_directory/nested_directory
   removing directory untracked_directory
@@ -138,6 +165,7 @@
 
   $ touch ignored
   $ hg purge -p
+  $ hg purge --confirm
   $ hg purge -v
   $ touch untracked_file
   $ ls
@@ -147,6 +175,10 @@
   untracked_file
   $ hg purge -p -i
   ignored
+  $ hg purge --confirm -i
+  permanently delete 1 ignored files? (yN) n
+  abort: removal cancelled
+  [250]
   $ hg purge -v -i
   removing file ignored
   $ ls -A
@@ -159,6 +191,10 @@
   $ hg purge -p --all
   ignored
   untracked_file
+  $ hg purge --confirm --all
+  permanently delete 1 unkown and 1 ignored files? (yN) n
+  abort: removal cancelled
+  [250]
   $ hg purge -v --all
   removing file ignored
   removing file untracked_file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-racy-mutations.t	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,102 @@
+#testcases skip-detection fail-if-detected
+
+Test situations that "should" only be reproducible:
+- on networked filesystems, or
+- user using `hg debuglocks` to eliminate the lock file, or
+- something (that doesn't respect the lock file) writing to the .hg directory
+while we're running
+
+  $ hg init a
+  $ cd a
+
+  $ cat > "$TESTTMP/waitlock_editor.sh" <<EOF
+  >     [ -n "\${WAITLOCK_ANNOUNCE:-}" ] && touch "\${WAITLOCK_ANNOUNCE}"
+  >     f="\${WAITLOCK_FILE}"
+  >     start=\`date +%s\`
+  >     timeout=5
+  >     while [ \\( ! -f \$f \\) -a \\( ! -L \$f \\) ]; do
+  >         now=\`date +%s\`
+  >         if [ "\`expr \$now - \$start\`" -gt \$timeout ]; then
+  >             echo "timeout: \$f was not created in \$timeout seconds (it is now \$(date +%s))"
+  >             exit 1
+  >         fi
+  >         sleep 0.1
+  >     done
+  >     if [ \$# -gt 1 ]; then
+  >         cat "\$@"
+  >     fi
+  > EOF
+  $ chmod +x "$TESTTMP/waitlock_editor.sh"
+
+Things behave differently if we don't already have a 00changelog.i file when
+this all starts, so let's make one.
+
+  $ echo r0 > r0
+  $ hg commit -qAm 'r0'
+
+Start an hg commit that will take a while
+  $ EDITOR_STARTED="$(pwd)/.editor_started"
+  $ MISCHIEF_MANAGED="$(pwd)/.mischief_managed"
+  $ JOBS_FINISHED="$(pwd)/.jobs_finished"
+
+#if fail-if-detected
+  $ cat >> .hg/hgrc << EOF
+  > [debug]
+  > revlog.verifyposition.changelog = fail
+  > EOF
+#endif
+
+  $ echo foo > foo
+  $ (WAITLOCK_ANNOUNCE="${EDITOR_STARTED}" \
+  >      WAITLOCK_FILE="${MISCHIEF_MANAGED}" \
+  >           HGEDITOR="$TESTTMP/waitlock_editor.sh" \
+  >           hg commit -qAm 'r1 (foo)' --edit foo > .foo_commit_out 2>&1 ; touch "${JOBS_FINISHED}") &
+
+Wait for the "editor" to actually start
+  $ WAITLOCK_FILE="${EDITOR_STARTED}" "$TESTTMP/waitlock_editor.sh"
+
+Break the locks, and make another commit.
+  $ hg debuglocks -LW
+  $ echo bar > bar
+  $ hg commit -qAm 'r2 (bar)' bar
+  $ hg debugrevlogindex -c
+     rev linkrev nodeid       p1           p2
+       0       0 222799e2f90b 000000000000 000000000000
+       1       1 6f124f6007a0 222799e2f90b 000000000000
+
+Awaken the editor from that first commit
+  $ touch "${MISCHIEF_MANAGED}"
+And wait for it to finish
+  $ WAITLOCK_FILE="${JOBS_FINISHED}" "$TESTTMP/waitlock_editor.sh"
+
+#if skip-detection
+(Ensure there was no output)
+  $ cat .foo_commit_out
+And observe a corrupted repository -- rev 2's linkrev is 1, which should never
+happen for the changelog (the linkrev should always refer to itself).
+  $ hg debugrevlogindex -c
+     rev linkrev nodeid       p1           p2
+       0       0 222799e2f90b 000000000000 000000000000
+       1       1 6f124f6007a0 222799e2f90b 000000000000
+       2       1 ac80e6205bb2 222799e2f90b 000000000000
+#endif
+
+#if fail-if-detected
+  $ cat .foo_commit_out
+  transaction abort!
+  rollback completed
+  note: commit message saved in .hg/last-message.txt
+  note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
+  abort: 00changelog.i: file cursor at position 249, expected 121
+And no corruption in the changelog.
+  $ hg debugrevlogindex -c
+     rev linkrev nodeid       p1           p2
+       0       0 222799e2f90b 000000000000 000000000000
+       1       1 6f124f6007a0 222799e2f90b 000000000000
+And, because of transactions, there's none in the manifestlog either.
+  $ hg debugrevlogindex -m
+     rev linkrev nodeid       p1           p2
+       0       0 7b7020262a56 000000000000 000000000000
+       1       1 ad3fe36d86d9 7b7020262a56 000000000000
+#endif
+
--- a/tests/test-rebase-collapse.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-rebase-collapse.t	Thu Mar 18 18:24:59 2021 -0400
@@ -134,7 +134,7 @@
 
   $ hg rebase --base B -m 'custom message'
   abort: message can only be specified with collapse
-  [255]
+  [10]
 
   $ cat > $TESTTMP/checkeditform.sh <<EOF
   > env | grep HGEDITFORM
@@ -180,7 +180,7 @@
 
   $ hg rebase -s C --dest H --collapse
   abort: unable to collapse on top of 3, there is more than one external parent: 1, 6
-  [255]
+  [20]
 
 Rebase and collapse - E onto H:
 
@@ -386,7 +386,7 @@
 BROKEN: should be allowed
   $ hg rebase --collapse -r 'B+D+F' -d G
   abort: unable to collapse on top of 2, there is more than one external parent: 3, 5
-  [255]
+  [20]
   $ cd ..
 
 
@@ -404,7 +404,7 @@
 
   $ hg rebase --collapse -d H -s 'B+F'
   abort: unable to collapse on top of 5, there is more than one external parent: 1, 3
-  [255]
+  [20]
   $ cd ..
 
 With internal merge:
@@ -484,7 +484,7 @@
   
   $ hg rebase --keepbranches --collapse -s 1 -d 3
   abort: cannot collapse multiple named branches
-  [255]
+  [10]
 
   $ cd ..
 
--- a/tests/test-rebase-conflicts.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-rebase-conflicts.t	Thu Mar 18 18:24:59 2021 -0400
@@ -318,10 +318,10 @@
   bundle2-input-part: total payload size 1686
   bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
   bundle2-input-part: total payload size 74
-  truncating cache/rbc-revs-v1 to 56
   bundle2-input-part: "phase-heads" supported
   bundle2-input-part: total payload size 24
   bundle2-input-bundle: 3 parts total
+  truncating cache/rbc-revs-v1 to 72
   added 2 changesets with 2 changes to 1 files
   updating the branch cache
   invalid branch cache (served): tip differs
--- a/tests/test-rebase-dest.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-rebase-dest.t	Thu Mar 18 18:24:59 2021 -0400
@@ -18,7 +18,7 @@
   $ hg rebase
   abort: you must specify a destination
   (use: hg rebase -d REV)
-  [255]
+  [10]
   $ hg rebase -d 1
   rebasing 2:5db65b93a12b tip "cc"
   saved backup bundle to $TESTTMP/repo/.hg/strip-backup/5db65b93a12b-4fb789ec-rebase.hg
@@ -74,7 +74,7 @@
   $ hg pull --rebase
   abort: rebase destination required by configuration
   (use hg pull followed by hg rebase -d DEST)
-  [255]
+  [10]
 
 Setup rebase with multiple destinations
 
@@ -152,7 +152,7 @@
   > A D
   > EOS
   abort: --collapse does not work with multiple destinations
-  [255]
+  [10]
 
 Multiple destinations cannot be used with --base:
 
@@ -192,7 +192,7 @@
   > Z
   > EOS
   abort: rebase destination for f0a671a46792 is not unique
-  [255]
+  [10]
 
 Destination is an ancestor of source:
 
@@ -204,7 +204,7 @@
   > Z
   > EOS
   abort: source and destination form a cycle
-  [255]
+  [10]
 
 BUG: cycles aren't flagged correctly when --dry-run is set:
   $ rebasewithdag -s B -d 'SRC' --dry-run <<'EOS'
@@ -216,7 +216,7 @@
   > EOS
   abort: source and destination form a cycle
   starting dry-run rebase; repository will not be changed
-  [255]
+  [10]
 
 Switch roots:
 
@@ -329,7 +329,7 @@
   >   Z
   > EOS
   abort: source and destination form a cycle
-  [255]
+  [10]
 
 Detect source is ancestor of dest in runtime:
 
@@ -341,7 +341,7 @@
   >   A
   > EOS
   abort: source is ancestor of destination
-  [255]
+  [10]
 
 "Already rebased" fast path still works:
 
--- a/tests/test-rebase-interruptions.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-rebase-interruptions.t	Thu Mar 18 18:24:59 2021 -0400
@@ -350,7 +350,7 @@
   M A
   rebasing 6:a0b2430ebfb8 tip "F"
   abort: precommit hook exited with status 1
-  [255]
+  [40]
   $ hg tglogp
   @  7: 401ccec5e39f secret 'C'
   |
@@ -401,7 +401,7 @@
   transaction abort!
   rollback completed
   abort: pretxncommit hook exited with status 1
-  [255]
+  [40]
   $ hg tglogp
   @  7: 401ccec5e39f secret 'C'
   |
@@ -451,7 +451,7 @@
   transaction abort!
   rollback completed
   abort: pretxnclose hook exited with status 1
-  [255]
+  [40]
   $ hg tglogp
   @  7: 401ccec5e39f secret 'C'
   |
--- a/tests/test-rebase-mq.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-rebase-mq.t	Thu Mar 18 18:24:59 2021 -0400
@@ -46,14 +46,14 @@
 
   $ hg rebase -s 1 -d 3
   abort: cannot rebase onto an applied mq patch
-  [255]
+  [20]
 
 Rebase - same thing, but mq patch is default dest:
 
   $ hg up -q 1
   $ hg rebase
   abort: cannot rebase onto an applied mq patch
-  [255]
+  [20]
   $ hg up -q qtip
 
 Rebase - generate a conflict:
--- a/tests/test-rebase-named-branches.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-rebase-named-branches.t	Thu Mar 18 18:24:59 2021 -0400
@@ -247,7 +247,7 @@
   
   $ hg rebase -s 5 -d 6
   abort: source and destination form a cycle
-  [255]
+  [10]
 
   $ hg rebase -s 6 -d 5
   rebasing 6:3944801ae4ea "dev-two named branch"
--- a/tests/test-rebase-newancestor.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-rebase-newancestor.t	Thu Mar 18 18:24:59 2021 -0400
@@ -154,7 +154,7 @@
   rebasing 2:ec2c14fb2984 "dev: f-dev stuff"
   rebasing 4:4b019212aaf6 "dev: merge default"
   abort: rebasing 4:4b019212aaf6 will include unwanted changes from 1:1d1a643d390e
-  [255]
+  [10]
   $ cd ..
 
 
@@ -314,7 +314,7 @@
   rebasing 6:b296604d9846 E "E"
   rebasing 7:caa9781e507d F tip "F"
   abort: rebasing 7:caa9781e507d will include unwanted changes from 4:d6003a550c2c or 3:c1e6b162678d
-  [255]
+  [10]
 
 The warning does not get printed if there is no unwanted change detected:
 
--- a/tests/test-rebase-obsolete.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-rebase-obsolete.t	Thu Mar 18 18:24:59 2021 -0400
@@ -560,7 +560,7 @@
   rebasing 2:b18e25de2cf5 D "D"
   rebasing 6:f15c3adaf214 F tip "F"
   abort: cannot rebase 6:f15c3adaf214 without moving at least one of its parents
-  [255]
+  [10]
 
   $ cd ..
 
@@ -948,7 +948,7 @@
   $ hg rebase -s 10 -d 12
   abort: this rebase will cause divergences from: 121d9e3bc4c6
   (to force the rebase please set experimental.evolution.allowdivergence=True)
-  [255]
+  [20]
   $ hg log -G
   @  14:73568ab6879d bar foo
   |
@@ -1152,7 +1152,7 @@
   $ hg rebase -r 'c'::'f' -d 'x'
   abort: this rebase will cause divergences from: 76be324c128b
   (to force the rebase please set experimental.evolution.allowdivergence=True)
-  [255]
+  [20]
   $ hg rebase --config experimental.evolution.allowdivergence=true -r 'c'::'f' -d 'x'
   rebasing 3:a82ac2b38757 c "c"
   rebasing 4:76be324c128b d "d"
@@ -1566,7 +1566,7 @@
   $ hg rebase -b 'desc("D")' -d 'desc("J")'
   abort: this rebase will cause divergences from: 112478962961
   (to force the rebase please set experimental.evolution.allowdivergence=True)
-  [255]
+  [20]
 
 Rebase merge where both parents have successors in destination
 
@@ -1585,7 +1585,7 @@
   note: not rebasing 5:b23a2cc00842 B "B", already in destination as 1:058c1e1fb10a D "D"
   rebasing 7:dac5d11c5a7d E tip "E"
   abort: rebasing 7:dac5d11c5a7d will include unwanted changes from 3:59c792af609c, 5:b23a2cc00842 or 2:ba2b7fa7166d, 4:a3d17304151f
-  [255]
+  [10]
   $ cd ..
 
 Rebase a non-clean merge. One parent has successor in destination, the other
@@ -1941,7 +1941,7 @@
   $ hg rebase --stop
   abort: cannot remove original changesets with unrebased descendants
   (either enable obsmarkers to allow unstable revisions or use --keep to keep original changesets)
-  [255]
+  [20]
   $ hg rebase --abort
   saved backup bundle to $TESTTMP/rbstop/.hg/strip-backup/b15528633407-6eb72b6f-backup.hg
   rebase aborted
@@ -2020,7 +2020,7 @@
   [240]
   $ hg rebase --stop
   abort: cannot stop in --collapse session
-  [255]
+  [20]
   $ hg rebase --abort
   rebase aborted
   $ hg diff
--- a/tests/test-rebase-parameters.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-rebase-parameters.t	Thu Mar 18 18:24:59 2021 -0400
@@ -66,7 +66,7 @@
 
   $ hg rebase --continue --collapse
   abort: cannot use collapse with continue or abort
-  [255]
+  [10]
 
   $ hg rebase --continue --dest 4
   abort: cannot specify both --continue and --dest
@@ -94,15 +94,15 @@
 
   $ hg rebase --rev 'wdir()' --dest 6
   abort: cannot rebase the working copy
-  [255]
+  [10]
 
   $ hg rebase --source 'wdir()' --dest 6
   abort: cannot rebase the working copy
-  [255]
+  [10]
 
   $ hg rebase --source 1 --source 'wdir()' --dest 6
   abort: cannot rebase the working copy
-  [255]
+  [10]
 
   $ hg rebase --source '1 & !1' --dest 8
   empty "source" revision set - nothing to rebase
@@ -508,11 +508,11 @@
 
   $ hg rebase -i
   abort: interactive history editing is supported by the 'histedit' extension (see "hg --config extensions.histedit= help -e histedit")
-  [255]
+  [10]
 
   $ hg rebase --interactive
   abort: interactive history editing is supported by the 'histedit' extension (see "hg --config extensions.histedit= help -e histedit")
-  [255]
+  [10]
 
   $ cd ..
 
--- a/tests/test-rebase-scenario-global.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-rebase-scenario-global.t	Thu Mar 18 18:24:59 2021 -0400
@@ -266,14 +266,14 @@
 
   $ hg rebase -s 5 -d 6
   abort: source and destination form a cycle
-  [255]
+  [10]
 
 G onto B - merge revision with both parents not in ancestors of target:
 
   $ hg rebase -s 6 -d 1
   rebasing 6:eea13746799a "G"
   abort: cannot rebase 6:eea13746799a without moving at least one of its parents
-  [255]
+  [10]
   $ hg rebase --abort
   rebase aborted
 
--- a/tests/test-remotefilelog-clone-tree.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-remotefilelog-clone-tree.t	Thu Mar 18 18:24:59 2021 -0400
@@ -91,7 +91,6 @@
 #   flakiness here
   $ hg clone --noupdate ssh://user@dummy/shallow full 2>/dev/null
   streaming all changes
-  remote: abort: Cannot clone from a shallow repo to a full repo.
   [255]
 
 # getbundle full clone
--- a/tests/test-remotefilelog-clone.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-remotefilelog-clone.t	Thu Mar 18 18:24:59 2021 -0400
@@ -85,9 +85,9 @@
   $ TEMP_STDERR=full-clone-from-shallow.stderr.tmp
   $ hg clone --noupdate ssh://user@dummy/shallow full 2>$TEMP_STDERR
   streaming all changes
-  remote: abort: Cannot clone from a shallow repo to a full repo.
   [255]
   $ cat $TEMP_STDERR
+  remote: abort: Cannot clone from a shallow repo to a full repo.
   abort: pull failed on remote
   $ rm $TEMP_STDERR
 
--- a/tests/test-remotefilelog-prefetch.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-remotefilelog-prefetch.t	Thu Mar 18 18:24:59 2021 -0400
@@ -180,7 +180,7 @@
   x: untracked file differs
   3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over * (glob)
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
   $ hg revert --all
 
 # Test batch fetching of lookup files during hg status
--- a/tests/test-rename-dir-merge.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-rename-dir-merge.t	Thu Mar 18 18:24:59 2021 -0400
@@ -110,7 +110,7 @@
   $ hg merge 2
   b/c: untracked file differs
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
   $ cat b/c
   target
 but it should succeed if the content matches
@@ -294,3 +294,45 @@
   M t/t
   R a/s
   R a/t
+
+  $ cd ..
+
+
+Test that files are moved to a new directory based on the path prefix that
+matches the most. dir1/ below gets renamed to dir2/, and dir1/subdir1/ gets
+renamed to dir2/subdir2/. We want dir1/subdir1/newfile to move to
+dir2/subdir2/ (not to dir2/subdir1/ as we would infer based on just the rename
+of dir1/ to dir2/).
+
+  $ hg init nested-renames
+  $ cd nested-renames
+  $ mkdir dir1
+  $ echo a > dir1/file1
+  $ echo b > dir1/file2
+  $ mkdir dir1/subdir1
+  $ echo c > dir1/subdir1/file3
+  $ echo d > dir1/subdir1/file4
+  $ hg ci -Aqm initial
+  $ hg mv dir1 dir2
+  moving dir1/file1 to dir2/file1
+  moving dir1/file2 to dir2/file2
+  moving dir1/subdir1/file3 to dir2/subdir1/file3
+  moving dir1/subdir1/file4 to dir2/subdir1/file4
+  $ hg mv dir2/subdir1 dir2/subdir2
+  moving dir2/subdir1/file3 to dir2/subdir2/file3
+  moving dir2/subdir1/file4 to dir2/subdir2/file4
+  $ hg ci -m 'move dir1/ to dir2/ and dir1/subdir1/ to dir2/subdir2/'
+  $ hg co 0
+  4 files updated, 0 files merged, 4 files removed, 0 files unresolved
+  $ echo e > dir1/subdir1/file5
+  $ hg ci -Aqm 'add file in dir1/subdir1/'
+  $ hg merge 1
+  5 files updated, 0 files merged, 4 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg files
+  dir2/file1
+  dir2/file2
+  dir2/subdir2/file3
+  dir2/subdir2/file4
+  dir2/subdir2/file5
+  $ cd ..
--- a/tests/test-requires.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-requires.t	Thu Mar 18 18:24:59 2021 -0400
@@ -5,7 +5,7 @@
   $ hg commit -m test
   $ rm .hg/requires
   $ hg tip
-  abort: unknown version (2) in revlog 00changelog.i
+  abort: unknown version (65535) in revlog 00changelog.i
   [50]
   $ echo indoor-pool > .hg/requires
   $ hg tip
--- a/tests/test-resolve.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-resolve.t	Thu Mar 18 18:24:59 2021 -0400
@@ -153,15 +153,15 @@
   $ hg up 0
   abort: outstanding merge conflicts
   (use 'hg resolve' to resolve)
-  [255]
+  [20]
   $ hg merge 2
   abort: outstanding merge conflicts
   (use 'hg resolve' to resolve)
-  [255]
+  [20]
   $ hg merge --force 2
   abort: outstanding merge conflicts
   (use 'hg resolve' to resolve)
-  [255]
+  [20]
 
 set up conflict-free merge
 
@@ -344,6 +344,24 @@
   $ hg resolve -l
   R file1
   R file2
+Test with :mergediff conflict markers
+  $ hg resolve --unmark
+  $ hg resolve --re-merge -t :mergediff file2
+  merging file2
+  warning: conflicts while merging file2! (edit, then use 'hg resolve --mark')
+  [1]
+  $ hg resolve -l
+  U file1
+  U file2
+  $ hg --config commands.resolve.mark-check=abort resolve -m
+  warning: the following files still have conflict markers:
+    file2
+  abort: conflict markers detected
+  (use --all to mark anyway)
+  [20]
+  $ hg resolve -l
+  U file1
+  U file2
 Test option value 'warn'
   $ hg resolve --unmark
   $ hg resolve -l
--- a/tests/test-revlog-raw.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-revlog-raw.py	Thu Mar 18 18:24:59 2021 -0400
@@ -51,10 +51,10 @@
 def readprocessor(self, rawtext):
     # True: the returned text could be used to verify hash
     text = rawtext[len(_extheader) :].replace(b'i', b'1')
-    return text, True, {}
+    return text, True
 
 
-def writeprocessor(self, text, sidedata):
+def writeprocessor(self, text):
     # False: the returned rawtext shouldn't be used to verify hash
     rawtext = _extheader + text.replace(b'1', b'i')
     return rawtext, False
@@ -147,6 +147,7 @@
                 b'flags': rlog.flags(r),
                 b'deltabase': rlog.node(deltaparent),
                 b'delta': rlog.revdiff(deltaparent, r),
+                b'sidedata': rlog.sidedata(r),
             }
 
         def deltaiter(self):
@@ -159,10 +160,11 @@
                 deltabase = chunkdata[b'deltabase']
                 delta = chunkdata[b'delta']
                 flags = chunkdata[b'flags']
+                sidedata = chunkdata[b'sidedata']
 
                 chain = node
 
-                yield (node, p1, p2, cs, deltabase, delta, flags)
+                yield (node, p1, p2, cs, deltabase, delta, flags, sidedata)
 
     def linkmap(lnode):
         return rlog.rev(lnode)
@@ -293,7 +295,7 @@
 
         # Verify text, rawtext, and rawsize
         if isext:
-            rawtext = writeprocessor(None, text, {})[0]
+            rawtext = writeprocessor(None, text)[0]
         else:
             rawtext = text
         if rlog.rawsize(rev) != len(rawtext):
--- a/tests/test-revlog-v2.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-revlog-v2.t	Thu Mar 18 18:24:59 2021 -0400
@@ -22,7 +22,7 @@
   $ cd empty-repo
   $ cat .hg/requires
   dotencode
-  exp-revlogv2.1
+  exp-revlogv2.2
   fncache
   sparserevlog
   store
--- a/tests/test-revlog.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-revlog.t	Thu Mar 18 18:24:59 2021 -0400
@@ -22,10 +22,10 @@
 Unknown version is rejected
 
   >>> with open('.hg/store/00changelog.i', 'wb') as fh:
-  ...     fh.write(b'\x00\x00\x00\x02') and None
+  ...     fh.write(b'\x00\x00\xbe\xef') and None
 
   $ hg log
-  abort: unknown version (2) in revlog 00changelog.i
+  abort: unknown version (48879) in revlog 00changelog.i
   [50]
 
   $ cd ..
--- a/tests/test-rhg.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-rhg.t	Thu Mar 18 18:24:59 2021 -0400
@@ -1,43 +1,50 @@
-#require rust
+#require rhg
 
-Define an rhg function that will only run if rhg exists
-  $ rhg() {
-  > if [ -f "$RUNTESTDIR/../rust/target/release/rhg" ]; then
-  >   "$RUNTESTDIR/../rust/target/release/rhg" "$@"
-  > else
-  >   echo "skipped: Cannot find rhg. Try to run cargo build in rust/rhg."
-  >   exit 80
-  > fi
-  > }
+  $ NO_FALLBACK="env RHG_ON_UNSUPPORTED=abort"
 
 Unimplemented command
-  $ rhg unimplemented-command
-  error: Found argument 'unimplemented-command' which wasn't expected, or isn't valid in this context
+  $ $NO_FALLBACK rhg unimplemented-command
+  unsupported feature: error: Found argument 'unimplemented-command' which wasn't expected, or isn't valid in this context
   
   USAGE:
-      rhg <SUBCOMMAND>
+      rhg [OPTIONS] <SUBCOMMAND>
   
   For more information try --help
+  
+  [252]
+  $ rhg unimplemented-command --config rhg.on-unsupported=abort-silent
   [252]
 
 Finding root
-  $ rhg root
+  $ $NO_FALLBACK rhg root
   abort: no repository found in '$TESTTMP' (.hg not found)!
   [255]
 
   $ hg init repository
   $ cd repository
-  $ rhg root
+  $ $NO_FALLBACK rhg root
   $TESTTMP/repository
 
+Reading and setting configuration
+  $ echo "[ui]" >> $HGRCPATH
+  $ echo "username = user1" >> $HGRCPATH
+  $ $NO_FALLBACK rhg config ui.username
+  user1
+  $ echo "[ui]" >> .hg/hgrc
+  $ echo "username = user2" >> .hg/hgrc
+  $ $NO_FALLBACK rhg config ui.username
+  user2
+  $ $NO_FALLBACK rhg --config ui.username=user3 config ui.username
+  user3
+
 Unwritable file descriptor
-  $ rhg root > /dev/full
+  $ $NO_FALLBACK rhg root > /dev/full
   abort: No space left on device (os error 28)
   [255]
 
 Deleted repository
   $ rm -rf `pwd`
-  $ rhg root
+  $ $NO_FALLBACK rhg root
   abort: error getting current working directory: $ENOENT$
   [255]
 
@@ -52,7 +59,7 @@
   > hg commit -m "commit $i" -q
 
 Listing tracked files from root
-  $ rhg files
+  $ $NO_FALLBACK rhg files
   file1
   file2
   file3
@@ -60,13 +67,13 @@
 Listing tracked files from subdirectory
   $ mkdir -p path/to/directory
   $ cd path/to/directory
-  $ rhg files
+  $ $NO_FALLBACK rhg files
   ../../../file1
   ../../../file2
   ../../../file3
 
 Listing tracked files through broken pipe
-  $ rhg files | head -n 1
+  $ $NO_FALLBACK rhg files | head -n 1
   ../../../file1
 
 Debuging data in inline index
@@ -79,20 +86,20 @@
   >   hg add file-$i
   >   hg commit -m "Commit $i" -q
   > done
-  $ rhg debugdata -c 2
+  $ $NO_FALLBACK rhg debugdata -c 2
   8d0267cb034247ebfa5ee58ce59e22e57a492297
   test
   0 0
   file-3
   
   Commit 3 (no-eol)
-  $ rhg debugdata -m 2
+  $ $NO_FALLBACK rhg debugdata -m 2
   file-1\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
   file-2\x005d9299349fc01ddd25d0070d149b124d8f10411e (esc)
   file-3\x002661d26c649684b482d10f91960cc3db683c38b4 (esc)
 
 Debuging with full node id
-  $ rhg debugdata -c `hg log -r 0 -T '{node}'`
+  $ $NO_FALLBACK rhg debugdata -c `hg log -r 0 -T '{node}'`
   d1d1c679d3053e8926061b6f45ca52009f011e3f
   test
   0 0
@@ -108,16 +115,16 @@
   cf8b83f14ead62b374b6e91a0e9303b85dfd9ed7
   91c6f6e73e39318534dc415ea4e8a09c99cd74d6
   6ae9681c6d30389694d8701faf24b583cf3ccafe
-  $ rhg files -r cf8b83
+  $ $NO_FALLBACK rhg files -r cf8b83
   file-1
   file-2
   file-3
-  $ rhg cat -r cf8b83 file-2
+  $ $NO_FALLBACK rhg cat -r cf8b83 file-2
   2
-  $ rhg cat -r c file-2
-  abort: ambiguous revision identifier c
+  $ $NO_FALLBACK rhg cat -r c file-2
+  abort: ambiguous revision identifier: c
   [255]
-  $ rhg cat -r d file-2
+  $ $NO_FALLBACK rhg cat -r d file-2
   2
 
 Cat files
@@ -128,16 +135,44 @@
   $ echo "original content" > original
   $ hg add original
   $ hg commit -m "add original" original
-  $ rhg cat -r 0 original
+  $ $NO_FALLBACK rhg cat -r 0 original
   original content
 Cat copied file should not display copy metadata
   $ hg copy original copy_of_original
   $ hg commit -m "add copy of original"
-  $ rhg cat -r 1 copy_of_original
+  $ $NO_FALLBACK rhg cat -r 1 copy_of_original
+  original content
+
+Fallback to Python
+  $ $NO_FALLBACK rhg cat original
+  unsupported feature: `rhg cat` without `--rev` / `-r`
+  [252]
+  $ rhg cat original
   original content
 
+  $ FALLBACK_EXE="$RHG_FALLBACK_EXECUTABLE"
+  $ unset RHG_FALLBACK_EXECUTABLE
+  $ rhg cat original
+  abort: 'rhg.on-unsupported=fallback' without 'rhg.fallback-executable' set.
+  [255]
+  $ RHG_FALLBACK_EXECUTABLE="$FALLBACK_EXE"
+  $ export RHG_FALLBACK_EXECUTABLE
+
+  $ rhg cat original --config rhg.fallback-executable=false
+  [1]
+
+  $ rhg cat original --config rhg.fallback-executable=hg-non-existent
+  tried to fall back to a 'hg-non-existent' sub-process but got error $ENOENT$
+  unsupported feature: `rhg cat` without `--rev` / `-r`
+  [252]
+
+  $ rhg cat original --config rhg.fallback-executable=rhg
+  Blocking recursive fallback. The 'rhg.fallback-executable = rhg' config points to `rhg` itself.
+  unsupported feature: `rhg cat` without `--rev` / `-r`
+  [252]
+
 Requirements
-  $ rhg debugrequirements
+  $ $NO_FALLBACK rhg debugrequirements
   dotencode
   fncache
   generaldelta
@@ -146,24 +181,21 @@
   store
 
   $ echo indoor-pool >> .hg/requires
-  $ rhg files
+  $ $NO_FALLBACK rhg files
+  unsupported feature: repository requires feature unknown to this Mercurial: indoor-pool
   [252]
 
-  $ rhg cat -r 1 copy_of_original
+  $ $NO_FALLBACK rhg cat -r 1 copy_of_original
+  unsupported feature: repository requires feature unknown to this Mercurial: indoor-pool
   [252]
 
-  $ rhg debugrequirements
-  dotencode
-  fncache
-  generaldelta
-  revlogv1
-  sparserevlog
-  store
-  indoor-pool
+  $ $NO_FALLBACK rhg debugrequirements
+  unsupported feature: repository requires feature unknown to this Mercurial: indoor-pool
+  [252]
 
   $ echo -e '\xFF' >> .hg/requires
-  $ rhg debugrequirements
-  abort: .hg/requires is corrupted
+  $ $NO_FALLBACK rhg debugrequirements
+  abort: parse error in 'requires' file
   [255]
 
 Persistent nodemap
@@ -171,7 +203,7 @@
   $ rm -rf repository
   $ hg init repository
   $ cd repository
-  $ rhg debugrequirements | grep nodemap
+  $ $NO_FALLBACK rhg debugrequirements | grep nodemap
   [1]
   $ hg debugbuilddag .+5000 --overwritten-file --config "storage.revlog.nodemap.mode=warn"
   $ hg id -r tip
@@ -179,14 +211,14 @@
   $ ls .hg/store/00changelog*
   .hg/store/00changelog.d
   .hg/store/00changelog.i
-  $ rhg files -r c3ae8dec9fad
+  $ $NO_FALLBACK rhg files -r c3ae8dec9fad
   of
 
   $ cd $TESTTMP
   $ rm -rf repository
   $ hg --config format.use-persistent-nodemap=True init repository
   $ cd repository
-  $ rhg debugrequirements | grep nodemap
+  $ $NO_FALLBACK rhg debugrequirements | grep nodemap
   persistent-nodemap
   $ hg debugbuilddag .+5000 --overwritten-file --config "storage.revlog.nodemap.mode=warn"
   $ hg id -r tip
@@ -198,7 +230,78 @@
   .hg/store/00changelog.n
 
 Specifying revisions by changeset ID
-  $ rhg files -r c3ae8dec9fad
+  $ $NO_FALLBACK rhg files -r c3ae8dec9fad
   of
-  $ rhg cat -r c3ae8dec9fad of
+  $ $NO_FALLBACK rhg cat -r c3ae8dec9fad of
   r5000
+
+Crate a shared repository
+
+  $ echo "[extensions]"      >> $HGRCPATH
+  $ echo "share = "          >> $HGRCPATH
+
+  $ cd $TESTTMP
+  $ hg init repo1
+  $ echo a > repo1/a
+  $ hg -R repo1 commit -A -m'init'
+  adding a
+
+  $ hg share repo1 repo2
+  updating working directory
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+And check that basic rhg commands work with sharing
+
+  $ $NO_FALLBACK rhg files -R repo2
+  repo2/a
+  $ $NO_FALLBACK rhg -R repo2 cat -r 0 repo2/a
+  a
+
+Same with relative sharing
+
+  $ hg share repo2 repo3 --relative
+  updating working directory
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ $NO_FALLBACK rhg files -R repo3
+  repo3/a
+  $ $NO_FALLBACK rhg -R repo3 cat -r 0 repo3/a
+  a
+
+Same with share-safe
+
+  $ echo "[format]"         >> $HGRCPATH
+  $ echo "use-share-safe = True" >> $HGRCPATH
+
+  $ cd $TESTTMP
+  $ hg init repo4
+  $ cd repo4
+  $ echo a > a
+  $ hg commit -A -m'init'
+  adding a
+
+  $ cd ..
+  $ hg share repo4 repo5
+  updating working directory
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+And check that basic rhg commands work with sharing
+
+  $ cd repo5
+  $ $NO_FALLBACK rhg files
+  a
+  $ $NO_FALLBACK rhg cat -r 0 a
+  a
+
+The blackbox extension is supported
+
+  $ echo "[extensions]" >> $HGRCPATH
+  $ echo "blackbox =" >> $HGRCPATH
+  $ echo "[blackbox]" >> $HGRCPATH
+  $ echo "maxsize = 1" >> $HGRCPATH
+  $ $NO_FALLBACK rhg files > /dev/null
+  $ cat .hg/blackbox.log
+  ????/??/?? ??:??:??.??? * @d3873e73d99ef67873dac33fbcc66268d5d2b6f4 (*)> (rust) files exited 0 after 0.??? seconds (glob)
+  $ cat .hg/blackbox.log.1
+  ????/??/?? ??:??:??.??? * @d3873e73d99ef67873dac33fbcc66268d5d2b6f4 (*)> (rust) files (glob)
+
--- a/tests/test-rollback.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-rollback.t	Thu Mar 18 18:24:59 2021 -0400
@@ -103,7 +103,7 @@
   transaction abort!
   rollback completed
   abort: pretxncommit hook exited with status * (glob)
-  [255]
+  [40]
   $ cat .hg/last-message.txt ; echo
   precious commit message
 
@@ -118,7 +118,7 @@
   note: commit message saved in .hg/last-message.txt
   note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
   abort: pretxncommit hook exited with status * (glob)
-  [255]
+  [40]
   $ cat .hg/last-message.txt
   another precious commit message
 
@@ -380,7 +380,7 @@
   warn during abort
   rollback completed
   abort: pretxncommit hook exited with status 1
-  [255]
+  [40]
 
   $ hg commit -m 'commit 1'
   warn during pretxncommit
@@ -405,7 +405,7 @@
   transaction abort!
   rollback completed
   abort: pretxncommit hook exited with status 1
-  [255]
+  [40]
 
   $ hg commit -m 'commit 1'
   warn during pretxncommit
@@ -431,7 +431,7 @@
   transaction abort!
   warn during abort
   abort: pretxncommit hook exited with status 1
-  [255]
+  [40]
 
   $ hg verify
   checking changesets
--- a/tests/test-setdiscovery.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-setdiscovery.t	Thu Mar 18 18:24:59 2021 -0400
@@ -1328,25 +1328,25 @@
   updating to branch b
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
-  $ hg -R a debugdiscovery b --debug --verbose --config progress.debug=true --config devel.discovery.randomize=false
+  $ hg -R a debugdiscovery b --debug --verbose --config progress.debug=true --config devel.discovery.randomize=false  --config devel.discovery.sample-size.initial=50
   comparing with b
   query 1; heads
   searching for changes
   taking quick initial sample
   searching: 2 queries
-  query 2; still undecided: 1080, sample size is: 100
+  query 2; still undecided: 1080, sample size is: 50
   sampling from both directions
   searching: 3 queries
-  query 3; still undecided: 980, sample size is: 200
+  query 3; still undecided: 1030, sample size is: 200
   sampling from both directions
   searching: 4 queries
-  query 4; still undecided: 497, sample size is: 210
+  query 4; still undecided: 547, sample size is: 210
   sampling from both directions
   searching: 5 queries
-  query 5; still undecided: 285, sample size is: 220
+  query 5; still undecided: 336, sample size is: 220
   sampling from both directions
   searching: 6 queries
-  query 6; still undecided: 63, sample size is: 63
+  query 6; still undecided: 114, sample size is: 114
   6 total queries in *.????s (glob)
   elapsed time:  * seconds (glob)
   round-trips:                   6
@@ -1412,22 +1412,30 @@
       missing:                1040
   common heads: 3ee37d65064a
 
-  $ hg -R a debugdiscovery b --debug --config devel.discovery.exchange-heads=false --config devel.discovery.randomize=false --config devel.discovery.grow-sample.rate=1.01
+  $ hg -R a debugdiscovery b --debug --config devel.discovery.exchange-heads=false --config devel.discovery.randomize=false --config devel.discovery.grow-sample.rate=1.20 --config devel.discovery.sample-size=50
   comparing with b
   searching for changes
   sampling from both directions
-  query 1; still undecided: 1340, sample size is: 200
+  query 1; still undecided: 1340, sample size is: 50
+  sampling from both directions
+  query 2; still undecided: 995, sample size is: 60
   sampling from both directions
-  query 2; still undecided: 795, sample size is: 202
+  query 3; still undecided: 913, sample size is: 72
   sampling from both directions
-  query 3; still undecided: 525, sample size is: 204
+  query 4; still undecided: 816, sample size is: 204
+  sampling from both directions
+  query 5; still undecided: 612, sample size is: 153
   sampling from both directions
-  query 4; still undecided: 252, sample size is: 206
+  query 6; still undecided: 456, sample size is: 123
+  sampling from both directions
+  query 7; still undecided: 332, sample size is: 147
   sampling from both directions
-  query 5; still undecided: 44, sample size is: 44
-  5 total queries in *s (glob)
-  elapsed time: * seconds (glob)
-  round-trips:                   5
+  query 8; still undecided: 184, sample size is: 176
+  sampling from both directions
+  query 9; still undecided: 8, sample size is: 8
+  9 total queries in *s (glob)
+  elapsed time:  * seconds (glob)
+  round-trips:                   9
   heads summary:
     total common heads:          1
       also local heads:          0
@@ -1580,3 +1588,175 @@
       common:                    0
       missing:                   1
   common heads: 66f7d451a68b
+
+  $ cd ..
+
+
+Test debuging discovery using different subset of the same repository
+=====================================================================
+
+remote is a local subset
+------------------------
+
+remote will be last 25 heads of the local graph
+
+  $ cd $TESTTMP/manyheads
+  $ hg -R a debugdiscovery \
+  > --debug \
+  > --remote-as-revs 'last(heads(all()), 25)' \
+  > --config devel.discovery.randomize=false
+  query 1; heads
+  searching for changes
+  all remote heads known locally
+  elapsed time:  * seconds (glob)
+  round-trips:                   1
+  heads summary:
+    total common heads:         25
+      also local heads:         25
+      also remote heads:        25
+      both:                     25
+    local heads:               260
+      common:                   25
+      missing:                 235
+    remote heads:               25
+      common:                   25
+      unknown:                   0
+  local changesets:           1340
+    common:                    400
+      heads:                    25
+      roots:                     1
+    missing:                   940
+      heads:                   235
+      roots:                   235
+    first undecided set:       940
+      heads:                   235
+      roots:                   235
+      common:                    0
+      missing:                 940
+  common heads: 0dfd965d91c6 0fe09b60448d 14a17233ce9d 175c0a3072cf 1c51e2c80832 1e51600e0698 24eb5f9bdbab 25ce09526613 36bd00abde57 426989fdefa0 596d87362679 5dd1039ea5c0 5ef24f022278 5f230dc19419 80b39998accb 88f40688ffb5 9e37ddf8c632 abf4d55b075e b2ce801fddfe b368b6ac3ce3 c959bf2e869c c9fba6ba4e2e d783207cf649 d9a51e256f21 e3717a4e3753
+
+local is a local subset
+------------------------
+
+remote will be last 25 heads of the local graph
+
+  $ cd $TESTTMP/manyheads
+  $ hg -R a debugdiscovery b \
+  > --debug \
+  > --local-as-revs 'first(heads(all()), 25)' \
+  > --config devel.discovery.randomize=false
+  comparing with b
+  query 1; heads
+  searching for changes
+  taking quick initial sample
+  query 2; still undecided: 375, sample size is: 81
+  sampling from both directions
+  query 3; still undecided: 3, sample size is: 3
+  3 total queries *s (glob)
+  elapsed time:  * seconds (glob)
+  round-trips:                   3
+  heads summary:
+    total common heads:          1
+      also local heads:          0
+      also remote heads:         0
+      both:                      0
+    local heads:                25
+      common:                    0
+      missing:                  25
+    remote heads:                1
+      common:                    0
+      unknown:                   1
+  local changesets:            400
+    common:                    300
+      heads:                     1
+      roots:                     1
+    missing:                   100
+      heads:                    25
+      roots:                    25
+    first undecided set:       400
+      heads:                    25
+      roots:                     1
+      common:                  300
+      missing:                 100
+  common heads: 3ee37d65064a
+
+both local and remove are subset
+------------------------
+
+remote will be last 25 heads of the local graph
+
+  $ cd $TESTTMP/manyheads
+  $ hg -R a debugdiscovery \
+  > --debug \
+  > --local-as-revs 'first(heads(all()), 25)' \
+  > --remote-as-revs 'last(heads(all()), 25)' \
+  > --config devel.discovery.randomize=false
+  query 1; heads
+  searching for changes
+  taking quick initial sample
+  query 2; still undecided: 375, sample size is: 81
+  sampling from both directions
+  query 3; still undecided: 3, sample size is: 3
+  3 total queries in *s (glob)
+  elapsed time:  * seconds (glob)
+  round-trips:                   3
+  heads summary:
+    total common heads:          1
+      also local heads:          0
+      also remote heads:         0
+      both:                      0
+    local heads:                25
+      common:                    0
+      missing:                  25
+    remote heads:               25
+      common:                    0
+      unknown:                  25
+  local changesets:            400
+    common:                    300
+      heads:                     1
+      roots:                     1
+    missing:                   100
+      heads:                    25
+      roots:                    25
+    first undecided set:       400
+      heads:                    25
+      roots:                     1
+      common:                  300
+      missing:                 100
+  common heads: 3ee37d65064a
+
+Test -T json output
+-------------------
+
+  $ hg -R a debugdiscovery \
+  > -T json \
+  > --debug \
+  > --local-as-revs 'first(heads(all()), 25)' \
+  > --remote-as-revs 'last(heads(all()), 25)' \
+  > --config devel.discovery.randomize=false
+  [
+   {
+    "elapsed": *, (glob)
+    "nb-common-heads": 1,
+    "nb-common-heads-both": 0,
+    "nb-common-heads-local": 0,
+    "nb-common-heads-remote": 0,
+    "nb-common-roots": 1,
+    "nb-head-local": 25,
+    "nb-head-local-missing": 25,
+    "nb-head-remote": 25,
+    "nb-head-remote-unknown": 25,
+    "nb-ini_und": 400,
+    "nb-ini_und-common": 300,
+    "nb-ini_und-heads": 25,
+    "nb-ini_und-missing": 100,
+    "nb-ini_und-roots": 1,
+    "nb-missing-heads": 25,
+    "nb-missing-roots": 25,
+    "nb-revs": 400,
+    "nb-revs-common": 300,
+    "nb-revs-missing": 100,
+    "output": "query 1; heads\nsearching for changes\ntaking quick initial sample\nquery 2; still undecided: 375, sample size is: 81\nsampling from both directions\nquery 3; still undecided: 3, sample size is: 3\n3 total queries in *s\n", (glob)
+    "total-roundtrips": 3
+   }
+  ]
--- a/tests/test-share-bookmarks.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-share-bookmarks.t	Thu Mar 18 18:24:59 2021 -0400
@@ -102,7 +102,7 @@
   transaction abort!
   rollback completed
   abort: pretxnclose hook exited with status 1
-  [255]
+  [40]
   $ hg book bm1
 
 FYI, in contrast to above test, bmX is invisible in repo1 (= shared
@@ -127,7 +127,7 @@
   transaction abort!
   rollback completed
   abort: pretxnclose hook exited with status 1
-  [255]
+  [40]
   $ hg book bm3
 
 clean up bm2 since it's uninteresting (not shared in the vfs case and
@@ -249,7 +249,7 @@
   no changes found
   adding remote bookmark bm3
   abort: forced failure by extension
-  [255]
+  [40]
   $ hg boo
      bm1                       3:b87954705719
      bm4                       5:92793bfc8cad
--- a/tests/test-share-safe.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-share-safe.t	Thu Mar 18 18:24:59 2021 -0400
@@ -352,18 +352,27 @@
     - changelog
     - manifest
   
-  $ hg debugupgraderepo --run -q
+  $ hg debugupgraderepo --run
   upgrade will perform the following actions:
   
   requirements
      preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
      added: share-safe
   
+  share-safe
+     Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
+  
   processed revlogs:
     - all-filelogs
     - changelog
     - manifest
   
+  beginning upgrade...
+  repository locked and read-only
+  creating temporary repository to stage upgraded data: $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
+  (it is safe to interrupt this process any time before data migration completes)
+  upgrading repository requirements
+  removing temporary repository $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
   repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
 
   $ hg debugrequirements
@@ -433,7 +442,7 @@
     - changelog
     - manifest
   
-  $ hg debugupgraderepo -q --run
+  $ hg debugupgraderepo --run
   upgrade will perform the following actions:
   
   requirements
@@ -445,6 +454,12 @@
     - changelog
     - manifest
   
+  beginning upgrade...
+  repository locked and read-only
+  creating temporary repository to stage upgraded data: $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
+  (it is safe to interrupt this process any time before data migration completes)
+  upgrading repository requirements
+  removing temporary repository $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
   repository downgraded to not use share safe mode, existing shares will not work and needs to be reshared.
 
   $ hg debugrequirements
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-sidedata-exchange.t	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,473 @@
+===========================
+Tests for sidedata exchange
+===========================
+
+Check simple exchange behavior
+==============================
+
+Pusher and pushed have sidedata enabled
+---------------------------------------
+
+  $ hg init sidedata-source --config format.exp-use-side-data=yes
+  $ cat << EOF >> sidedata-source/.hg/hgrc
+  > [extensions]
+  > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
+  > EOF
+  $ hg init sidedata-target --config format.exp-use-side-data=yes
+  $ cat << EOF >> sidedata-target/.hg/hgrc
+  > [extensions]
+  > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
+  > EOF
+  $ cd sidedata-source
+  $ echo a > a
+  $ echo b > b
+  $ echo c > c
+  $ hg commit -Am "initial"
+  adding a
+  adding b
+  adding c
+  $ echo aa > a
+  $ hg commit -m "other"
+  $ hg push -r . ../sidedata-target
+  pushing to ../sidedata-target
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 4 changes to 3 files
+  $ hg -R ../sidedata-target debugsidedata -c 0
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+  $ hg -R ../sidedata-target debugsidedata -c 1 -v
+  2 sidedata entries
+   entry-0001 size 4
+    '\x00\x00\x00:'
+   entry-0002 size 32
+    '\xa3\xee4v\x99\x85$\x9f\x1f\x8dKe\x0f\xc3\x9d-\xc9\xb5%[\x15=h\xe9\xf2O\xb5\xd9\x1f*\xff\xe5'
+  $ hg -R ../sidedata-target debugsidedata -m 0
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+  $ hg -R ../sidedata-target debugsidedata -m 1 -v
+  2 sidedata entries
+   entry-0001 size 4
+    '\x00\x00\x00\x81'
+   entry-0002 size 32
+    '-bL\xc5\xa4uu"#\xac\x1b`,\xc0\xbc\x9d\xf5\xac\xf0\x1d\x89)2\xf8N\xb1\x14m\xce\xd7\xbc\xae'
+  $ hg -R ../sidedata-target debugsidedata a 0
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+  $ hg -R ../sidedata-target debugsidedata a 1 -v
+  2 sidedata entries
+   entry-0001 size 4
+    '\x00\x00\x00\x03'
+   entry-0002 size 32
+    '\xd9\xcd\x81UvL5C\xf1\x0f\xad\x8aH\rt17Fo\x8dU!<\x8e\xae\xfc\xd1/\x06\xd4:\x80'
+  $ cd ..
+
+Puller and pulled have sidedata enabled
+---------------------------------------
+
+  $ rm -rf sidedata-source sidedata-target
+  $ hg init sidedata-source --config format.exp-use-side-data=yes
+  $ cat << EOF >> sidedata-source/.hg/hgrc
+  > [extensions]
+  > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
+  > EOF
+  $ hg init sidedata-target --config format.exp-use-side-data=yes
+  $ cat << EOF >> sidedata-target/.hg/hgrc
+  > [extensions]
+  > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
+  > EOF
+  $ cd sidedata-source
+  $ echo a > a
+  $ echo b > b
+  $ echo c > c
+  $ hg commit -Am "initial"
+  adding a
+  adding b
+  adding c
+  $ echo aa > a
+  $ hg commit -m "other"
+  $ hg pull -R ../sidedata-target ../sidedata-source
+  pulling from ../sidedata-source
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 4 changes to 3 files
+  new changesets 05da661850d7:7ec8b4049447
+  (run 'hg update' to get a working copy)
+  $ hg -R ../sidedata-target debugsidedata -c 0
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+  $ hg -R ../sidedata-target debugsidedata -c 1 -v
+  2 sidedata entries
+   entry-0001 size 4
+    '\x00\x00\x00:'
+   entry-0002 size 32
+    '\xa3\xee4v\x99\x85$\x9f\x1f\x8dKe\x0f\xc3\x9d-\xc9\xb5%[\x15=h\xe9\xf2O\xb5\xd9\x1f*\xff\xe5'
+  $ hg -R ../sidedata-target debugsidedata -m 0
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+  $ hg -R ../sidedata-target debugsidedata -m 1 -v
+  2 sidedata entries
+   entry-0001 size 4
+    '\x00\x00\x00\x81'
+   entry-0002 size 32
+    '-bL\xc5\xa4uu"#\xac\x1b`,\xc0\xbc\x9d\xf5\xac\xf0\x1d\x89)2\xf8N\xb1\x14m\xce\xd7\xbc\xae'
+  $ hg -R ../sidedata-target debugsidedata a 0
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+  $ hg -R ../sidedata-target debugsidedata a 1 -v
+  2 sidedata entries
+   entry-0001 size 4
+    '\x00\x00\x00\x03'
+   entry-0002 size 32
+    '\xd9\xcd\x81UvL5C\xf1\x0f\xad\x8aH\rt17Fo\x8dU!<\x8e\xae\xfc\xd1/\x06\xd4:\x80'
+  $ cd ..
+
+Now on to asymmetric configs.
+
+Pusher has sidedata enabled, pushed does not
+--------------------------------------------
+
+  $ rm -rf sidedata-source sidedata-target
+  $ hg init sidedata-source --config format.exp-use-side-data=yes
+  $ cat << EOF >> sidedata-source/.hg/hgrc
+  > [extensions]
+  > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
+  > EOF
+  $ hg init sidedata-target --config format.exp-use-side-data=no
+  $ cd sidedata-source
+  $ echo a > a
+  $ echo b > b
+  $ echo c > c
+  $ hg commit -Am "initial"
+  adding a
+  adding b
+  adding c
+  $ echo aa > a
+  $ hg commit -m "other"
+  $ hg push -r . ../sidedata-target --traceback
+  pushing to ../sidedata-target
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 4 changes to 3 files
+  $ hg -R ../sidedata-target log -G
+  o  changeset:   1:7ec8b4049447
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     other
+  |
+  o  changeset:   0:05da661850d7
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     initial
+  
+
+  $ hg -R ../sidedata-target debugsidedata -c 0
+  $ hg -R ../sidedata-target debugsidedata -c 1 -v
+  $ hg -R ../sidedata-target debugsidedata -m 0
+  $ hg -R ../sidedata-target debugsidedata -m 1 -v
+  $ hg -R ../sidedata-target debugsidedata a 0
+  $ hg -R ../sidedata-target debugsidedata a 1 -v
+  $ cd ..
+
+Pulled has sidedata enabled, puller does not
+--------------------------------------------
+
+  $ rm -rf sidedata-source sidedata-target
+  $ hg init sidedata-source --config format.exp-use-side-data=yes
+  $ cat << EOF >> sidedata-source/.hg/hgrc
+  > [extensions]
+  > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
+  > EOF
+  $ hg init sidedata-target --config format.exp-use-side-data=no
+  $ cd sidedata-source
+  $ echo a > a
+  $ echo b > b
+  $ echo c > c
+  $ hg commit -Am "initial"
+  adding a
+  adding b
+  adding c
+  $ echo aa > a
+  $ hg commit -m "other"
+  $ hg pull -R ../sidedata-target ../sidedata-source
+  pulling from ../sidedata-source
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 4 changes to 3 files
+  new changesets 05da661850d7:7ec8b4049447
+  (run 'hg update' to get a working copy)
+  $ hg -R ../sidedata-target log -G
+  o  changeset:   1:7ec8b4049447
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     other
+  |
+  o  changeset:   0:05da661850d7
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     initial
+  
+
+  $ hg -R ../sidedata-target debugsidedata -c 0
+  $ hg -R ../sidedata-target debugsidedata -c 1 -v
+  $ hg -R ../sidedata-target debugsidedata -m 0
+  $ hg -R ../sidedata-target debugsidedata -m 1 -v
+  $ hg -R ../sidedata-target debugsidedata a 0
+  $ hg -R ../sidedata-target debugsidedata a 1 -v
+  $ cd ..
+
+
+Check sidedata exchange with on-the-fly generation and removal
+==============================================================
+
+(Push) Target has strict superset of the source
+-----------------------------------------------
+
+  $ hg init source-repo --config format.exp-use-side-data=yes
+  $ hg init target-repo --config format.exp-use-side-data=yes
+  $ cat << EOF >> target-repo/.hg/hgrc
+  > [extensions]
+  > testsidedata=$TESTDIR/testlib/ext-sidedata.py
+  > EOF
+  $ cd source-repo
+  $ echo aaa > a
+  $ hg add a
+  $ hg commit -m a
+  $ echo aaa > b
+  $ hg add b
+  $ hg commit -m b
+  $ echo xxx >> a
+  $ hg commit -m aa
+
+No sidedata is generated in the source
+  $ hg debugsidedata -c 0
+
+Check that sidedata capabilities are advertised
+  $ hg debugcapabilities ../target-repo | grep sidedata
+    exp-wanted-sidedata=1,2
+
+We expect the client to abort the push since it's not capable of generating
+what the server is asking
+  $ hg push -r . ../target-repo
+  pushing to ../target-repo
+  abort: cannot push: required sidedata category not supported by this client: '1'
+  [255]
+
+Add the required capabilities
+  $ cat << EOF >> .hg/hgrc
+  > [extensions]
+  > testsidedata2=$TESTDIR/testlib/ext-sidedata-2.py
+  > EOF
+
+We expect the target to have sidedata that was generated by the source on push
+  $ hg push -r . ../target-repo
+  pushing to ../target-repo
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 3 changesets with 3 changes to 2 files
+  $ cd ../target-repo
+  $ hg debugsidedata -c 0
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+  $ hg debugsidedata -c 1 -v
+  2 sidedata entries
+   entry-0001 size 4
+    '\x00\x00\x006'
+   entry-0002 size 32
+    '\x98\t\xf9\xc4v\xf0\xc5P\x90\xf7wRf\xe8\xe27e\xfc\xc1\x93\xa4\x96\xd0\x1d\x97\xaaG\x1d\xd7t\xfa\xde'
+  $ hg debugsidedata -m 2
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+  $ hg debugsidedata a 1
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+  $ cd ..
+
+(Push) Difference is not subset/superset
+----------------------------------------
+
+Source has one in common, one missing and one more sidedata category with the
+target.
+
+  $ rm -rf source-repo target-repo
+  $ hg init source-repo --config format.exp-use-side-data=yes
+  $ cat << EOF >> source-repo/.hg/hgrc
+  > [extensions]
+  > testsidedata3=$TESTDIR/testlib/ext-sidedata-3.py
+  > EOF
+  $ hg init target-repo --config format.exp-use-side-data=yes
+  $ cat << EOF >> target-repo/.hg/hgrc
+  > [extensions]
+  > testsidedata4=$TESTDIR/testlib/ext-sidedata-4.py
+  > EOF
+  $ cd source-repo
+  $ echo aaa > a
+  $ hg add a
+  $ hg commit -m a
+  $ echo aaa > b
+  $ hg add b
+  $ hg commit -m b
+  $ echo xxx >> a
+  $ hg commit -m aa
+
+Check that sidedata capabilities are advertised
+  $ hg debugcapabilities . | grep sidedata
+    exp-wanted-sidedata=1,2
+  $ hg debugcapabilities ../target-repo | grep sidedata
+    exp-wanted-sidedata=2,3
+
+Sidedata is generated in the source, but only the right categories (entry-0001 and entry-0002)
+  $ hg debugsidedata -c 0
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+  $ hg debugsidedata -c 1 -v
+  2 sidedata entries
+   entry-0001 size 4
+    '\x00\x00\x006'
+   entry-0002 size 32
+    '\x98\t\xf9\xc4v\xf0\xc5P\x90\xf7wRf\xe8\xe27e\xfc\xc1\x93\xa4\x96\xd0\x1d\x97\xaaG\x1d\xd7t\xfa\xde'
+  $ hg debugsidedata -m 2
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+  $ hg debugsidedata a 1
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+
+
+We expect the target to have sidedata that was generated by the source on push,
+and also removed the sidedata categories that are not supported by the target.
+Namely, we expect entry-0002 (only exchanged) and entry-0003 (generated),
+but not entry-0001.
+
+  $ hg push -r . ../target-repo --traceback
+  pushing to ../target-repo
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 3 changesets with 3 changes to 2 files
+  $ cd ../target-repo
+  $ hg log -G
+  o  changeset:   2:40f977031323
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     aa
+  |
+  o  changeset:   1:2707720c6597
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     b
+  |
+  o  changeset:   0:7049e48789d7
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     a
+  
+  $ hg debugsidedata -c 0
+  2 sidedata entries
+   entry-0002 size 32
+   entry-0003 size 48
+  $ hg debugsidedata -c 1 -v
+  2 sidedata entries
+   entry-0002 size 32
+    '\x98\t\xf9\xc4v\xf0\xc5P\x90\xf7wRf\xe8\xe27e\xfc\xc1\x93\xa4\x96\xd0\x1d\x97\xaaG\x1d\xd7t\xfa\xde'
+   entry-0003 size 48
+    '\x87\xcf\xdfI/\xb5\xed\xeaC\xc1\xf0S\xf3X\x1c\xcc\x00m\xee\xe6#\xc1\xe3\xcaB8Fk\x82e\xfc\xc01\xf6\xb7\xb9\xb3([\xf6D\xa6\xcf\x9b\xea\x11{\x08'
+  $ hg debugsidedata -m 2
+  2 sidedata entries
+   entry-0002 size 32
+   entry-0003 size 48
+  $ hg debugsidedata a 1
+  2 sidedata entries
+   entry-0002 size 32
+   entry-0003 size 48
+  $ cd ..
+
+(Pull) Target has strict superset of the source
+-----------------------------------------------
+
+  $ rm -rf source-repo target-repo
+  $ hg init source-repo --config format.exp-use-side-data=yes
+  $ hg init target-repo --config format.exp-use-side-data=yes
+  $ cat << EOF >> target-repo/.hg/hgrc
+  > [extensions]
+  > testsidedata=$TESTDIR/testlib/ext-sidedata.py
+  > EOF
+  $ cd source-repo
+  $ echo aaa > a
+  $ hg add a
+  $ hg commit -m a
+  $ echo aaa > b
+  $ hg add b
+  $ hg commit -m b
+  $ echo xxx >> a
+  $ hg commit -m aa
+
+No sidedata is generated in the source
+  $ hg debugsidedata -c 0
+
+Check that sidedata capabilities are advertised
+  $ hg debugcapabilities ../target-repo | grep sidedata
+    exp-wanted-sidedata=1,2
+
+  $ cd ../target-repo
+
+Add the required capabilities
+  $ cat << EOF >> .hg/hgrc
+  > [extensions]
+  > testsidedata2=$TESTDIR/testlib/ext-sidedata-2.py
+  > EOF
+
+We expect the target to have sidedata that it generated on-the-fly during pull
+  $ hg pull -r . ../source-repo  --traceback
+  pulling from ../source-repo
+  adding changesets
+  adding manifests
+  adding file changes
+  added 3 changesets with 3 changes to 2 files
+  new changesets 7049e48789d7:40f977031323
+  (run 'hg update' to get a working copy)
+  $ hg debugsidedata -c 0 --traceback
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+  $ hg debugsidedata -c 1 -v --traceback
+  2 sidedata entries
+   entry-0001 size 4
+    '\x00\x00\x006'
+   entry-0002 size 32
+    '\x98\t\xf9\xc4v\xf0\xc5P\x90\xf7wRf\xe8\xe27e\xfc\xc1\x93\xa4\x96\xd0\x1d\x97\xaaG\x1d\xd7t\xfa\xde'
+  $ hg debugsidedata -m 2
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+  $ hg debugsidedata a 1
+  2 sidedata entries
+   entry-0001 size 4
+   entry-0002 size 32
+  $ cd ..
--- a/tests/test-sidedata.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-sidedata.t	Thu Mar 18 18:24:59 2021 -0400
@@ -56,9 +56,9 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:            no     no      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no     no      no
+  revlog-v2:           no     no      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib
   compression-level:  default default default
@@ -69,9 +69,9 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:            no    yes      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no     no      no
+  revlog-v2:           no    yes      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib
   compression-level:  default default default
@@ -88,9 +88,9 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:           yes     no      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no     no      no
+  revlog-v2:          yes     no      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib
   compression-level:  default default default
@@ -101,9 +101,9 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:           yes     no      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no     no      no
+  revlog-v2:          yes     no      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib
   compression-level:  default default default
--- a/tests/test-simplemerge.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-simplemerge.py	Thu Mar 18 18:24:59 2021 -0400
@@ -141,8 +141,6 @@
         """No conflicts because nothing changed"""
         m3 = Merge3([b'aaa', b'bbb'], [b'aaa', b'bbb'], [b'aaa', b'bbb'])
 
-        self.assertEqual(m3.find_unconflicted(), [(0, 2)])
-
         self.assertEqual(
             list(m3.find_sync_regions()),
             [(0, 2, 0, 2, 0, 2), (2, 2, 2, 2, 2, 2)],
@@ -189,8 +187,6 @@
             [b'aaa', b'bbb'], [b'aaa', b'111', b'bbb'], [b'aaa', b'bbb']
         )
 
-        self.assertEqual(m3.find_unconflicted(), [(0, 1), (1, 2)])
-
         self.assertEqual(
             list(m3.find_sync_regions()),
             [(0, 1, 0, 1, 0, 1), (1, 2, 2, 3, 1, 2), (2, 2, 3, 3, 2, 2)],
@@ -271,8 +267,6 @@
             [b'aaa\n', b'222\n', b'bbb\n'],
         )
 
-        self.assertEqual(m3.find_unconflicted(), [(0, 1), (1, 2)])
-
         self.assertEqual(
             list(m3.find_sync_regions()),
             [(0, 1, 0, 1, 0, 1), (1, 2, 2, 3, 2, 3), (2, 2, 3, 3, 3, 3)],
@@ -323,8 +317,6 @@
             [b'aaa', b'222', b'bbb'],
         )
 
-        self.assertEqual(m3.find_unconflicted(), [(0, 1), (2, 3)])
-
         self.assertEqual(
             list(m3.find_sync_regions()),
             [(0, 1, 0, 1, 0, 1), (2, 3, 2, 3, 2, 3), (3, 3, 3, 3, 3, 3)],
@@ -338,8 +330,6 @@
             [b'aaa', b'222', b'222', b'222', b'222', b'bbb'],
         )
 
-        self.assertEqual(m3.find_unconflicted(), [(0, 1), (3, 4)])
-
         self.assertEqual(
             list(m3.find_sync_regions()),
             [(0, 1, 0, 1, 0, 1), (3, 4, 4, 5, 5, 6), (4, 4, 5, 5, 6, 6)],
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-ssh-batch.t	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,15 @@
+  $ hg init a
+  $ cd a
+  $ touch a; hg commit -qAm_
+  $ hg bookmark $(for i in $($TESTDIR/seq.py 0 20); do echo b$i; done)
+  $ hg clone . ../b -q
+  $ cd ../b
+
+Checking that when lookup multiple bookmarks in one go, if one of them
+fails (thus causing the sshpeer to be stopped), the errors from the
+further lookups don't result in tracebacks.
+
+  $ hg pull -r b0 -r nosuchbookmark $(for i in $($TESTDIR/seq.py 1 20); do echo -r b$i; done) -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/$(pwd)/../a
+  pulling from ssh://user@dummy/$TESTTMP/b/../a
+  abort: unknown revision 'nosuchbookmark'
+  [255]
--- a/tests/test-ssh-bundle1.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-ssh-bundle1.t	Thu Mar 18 18:24:59 2021 -0400
@@ -482,7 +482,7 @@
   sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
   sending hello command
   sending between command
-  remote: 463 (sshv1 !)
+  remote: 444 (sshv1 !)
   protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
   remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1 (sshv1 !)
--- a/tests/test-ssh-proto-unbundle.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-ssh-proto-unbundle.t	Thu Mar 18 18:24:59 2021 -0400
@@ -56,8 +56,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -109,8 +109,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -235,8 +235,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -293,8 +293,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -359,8 +359,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -418,8 +418,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -485,8 +485,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -543,8 +543,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -609,8 +609,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -668,8 +668,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -735,8 +735,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -796,8 +796,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -865,8 +865,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -923,8 +923,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -989,8 +989,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1050,8 +1050,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1119,8 +1119,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1180,8 +1180,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1255,8 +1255,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1314,8 +1314,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1382,8 +1382,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1441,8 +1441,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1511,8 +1511,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1572,8 +1572,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1650,8 +1650,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1715,8 +1715,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1788,8 +1788,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1843,8 +1843,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1918,8 +1918,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1977,8 +1977,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
--- a/tests/test-ssh-proto.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-ssh-proto.t	Thu Mar 18 18:24:59 2021 -0400
@@ -64,7 +64,7 @@
   devel-peer-request:   pairs: 81 bytes
   sending hello command
   sending between command
-  remote: 463
+  remote: 444
   remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
@@ -86,8 +86,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
 
 `hg debugserve --sshstdio` works
@@ -96,7 +96,7 @@
   $ hg debugserve --sshstdio << EOF
   > hello
   > EOF
-  463
+  444
   capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
 
 I/O logging works
@@ -106,24 +106,24 @@
   > EOF
   e> flush() -> None
   o> write(4) -> 4:
-  o>     463\n
-  o> write(463) -> 463:
+  o>     444\n
+  o> write(444) -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
-  463
+  444
   capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> flush() -> None
 
   $ hg debugserve --sshstdio --logiofile $TESTTMP/io << EOF
   > hello
   > EOF
-  463
+  444
   capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
 
   $ cat $TESTTMP/io
   e> flush() -> None
   o> write(4) -> 4:
-  o>     463\n
-  o> write(463) -> 463:
+  o>     444\n
+  o> write(444) -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> flush() -> None
 
@@ -149,8 +149,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -187,7 +187,7 @@
   remote: banner: line 7
   remote: banner: line 8
   remote: banner: line 9
-  remote: 463
+  remote: 444
   remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
@@ -245,8 +245,8 @@
   o> readline() -> 15:
   o>     banner: line 9\n
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -297,12 +297,12 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     463\n
+  o>     444\n
   i> write(98) -> 98:
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
-  o> readline() -> 463:
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -316,7 +316,7 @@
   sending hello command
   sending between command
   remote: 0
-  remote: 463
+  remote: 444
   remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
@@ -365,8 +365,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -390,7 +390,7 @@
   remote: 0
   remote: 0
   remote: 0
-  remote: 463
+  remote: 444
   remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
@@ -447,8 +447,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -494,8 +494,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -539,8 +539,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -609,8 +609,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
 
 Incomplete dictionary send
@@ -691,8 +691,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -725,8 +725,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -768,8 +768,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -797,8 +797,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(105) -> 105:
   i>     between\n
@@ -838,8 +838,8 @@
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -887,8 +887,8 @@
   o> readline() -> 41:
   o>     68986213bd4485ea51533535e3fc9e78007a711f\n
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -914,7 +914,7 @@
   o> readline() -> 41:
   o>     68986213bd4485ea51533535e3fc9e78007a711f\n
   o> readline() -> 4:
-  o>     463\n
+  o>     444\n
 
 Send an upgrade request to a server that doesn't support that command
 
@@ -943,8 +943,8 @@
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -962,7 +962,7 @@
   sending hello command
   sending between command
   remote: 0
-  remote: 463
+  remote: 444
   remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
@@ -1005,8 +1005,8 @@
   o> readline() -> 44:
   o>     upgraded this-is-some-token exp-ssh-v2-0003\n
   o> readline() -> 4:
-  o>     462\n
-  o> readline() -> 463:
+  o>     443\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
 
   $ cd ..
@@ -1081,7 +1081,6 @@
     remote-changegroup
       http
       https
-    rev-branch-cache
     stream
       v2
 
@@ -1114,14 +1113,14 @@
   o> readline() -> 44:
   o>     upgraded this-is-some-token exp-ssh-v2-0003\n
   o> readline() -> 4:
-  o>     462\n
-  o> readline() -> 463:
+  o>     443\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     447\n
-  o> readline() -> 447:
+  o>     428\n
+  o> readline() -> 428:
   o>     capabilities: branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
 
 Multiple upgrades is not allowed
@@ -1152,8 +1151,8 @@
   o> readline() -> 44:
   o>     upgraded this-is-some-token exp-ssh-v2-0003\n
   o> readline() -> 4:
-  o>     462\n
-  o> readline() -> 463:
+  o>     443\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(45) -> 45:
   i>     upgrade another-token proto=irrelevant\n
@@ -1224,8 +1223,8 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
@@ -1343,8 +1342,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1381,8 +1380,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1431,8 +1430,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1461,8 +1460,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1492,8 +1491,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1525,8 +1524,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1559,8 +1558,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1595,8 +1594,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1634,8 +1633,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1674,8 +1673,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending pushkey command
@@ -1726,8 +1725,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1759,8 +1758,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1809,8 +1808,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1847,8 +1846,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1886,8 +1885,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1922,8 +1921,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1959,8 +1958,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -1992,8 +1991,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -2030,8 +2029,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -2071,8 +2070,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending pushkey command
@@ -2137,8 +2136,8 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     463\n
-  o> readline() -> 463:
+  o>     444\n
+  o> readline() -> 444:
   o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
@@ -2177,8 +2176,8 @@
   o> readline() -> 62:
   o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     462\n
-  o> read(462) -> 462: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     443\n
+  o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending batch with 3 sub-commands
--- a/tests/test-ssh.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-ssh.t	Thu Mar 18 18:24:59 2021 -0400
@@ -390,6 +390,7 @@
   abort: destination 'a repo' is not empty
   [10]
 
+#if no-rhg
 Make sure hg is really paranoid in serve --stdio mode. It used to be
 possible to get a debugger REPL by specifying a repo named --debugger.
   $ hg -R --debugger serve --stdio
@@ -402,6 +403,27 @@
   $ hg -R narf serv --stdio
   abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio']
   [255]
+#else
+rhg aborts early on -R without a repository at that path
+  $ hg -R --debugger serve --stdio
+  abort: potentially unsafe serve --stdio invocation: ['-R', '--debugger', 'serve', '--stdio'] (missing-correct-output !)
+  abort: repository --debugger not found (known-bad-output !)
+  [255]
+  $ hg -R --config=ui.debugger=yes serve --stdio
+  abort: potentially unsafe serve --stdio invocation: ['-R', '--config=ui.debugger=yes', 'serve', '--stdio'] (missing-correct-output !)
+  abort: repository --config=ui.debugger=yes not found (known-bad-output !)
+  [255]
+  $ hg -R narf serv --stdio
+  abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio'] (missing-correct-output !)
+  abort: repository narf not found (known-bad-output !)
+  [255]
+If the repo does exist, rhg finds an unsupported command and falls back to Python
+which still does the right thing
+  $ hg init narf
+  $ hg -R narf serv --stdio
+  abort: potentially unsafe serve --stdio invocation: ['-R', 'narf', 'serv', '--stdio']
+  [255]
+#endif
 
 Test hg-ssh using a helper script that will restore PYTHONPATH (which might
 have been cleared by a hg.exe wrapper) and invoke hg-ssh with the right
@@ -518,7 +540,7 @@
   devel-peer-request:   pairs: 81 bytes
   sending hello command
   sending between command
-  remote: 463 (sshv1 !)
+  remote: 444 (sshv1 !)
   protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
   remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1 (sshv1 !)
@@ -537,7 +559,7 @@
   no changes found
   devel-peer-request: getbundle
   devel-peer-request:   bookmarks: 1 bytes
-  devel-peer-request:   bundlecaps: 289 bytes
+  devel-peer-request:   bundlecaps: 270 bytes
   devel-peer-request:   cg: 1 bytes
   devel-peer-request:   common: 122 bytes
   devel-peer-request:   heads: 122 bytes
--- a/tests/test-static-http.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-static-http.t	Thu Mar 18 18:24:59 2021 -0400
@@ -231,8 +231,6 @@
   /.hg/bookmarks
   /.hg/bookmarks.current
   /.hg/cache/hgtagsfnodes1
-  /.hg/cache/rbc-names-v1
-  /.hg/cache/rbc-revs-v1
   /.hg/dirstate
   /.hg/requires
   /.hg/store/00changelog.i
@@ -248,8 +246,6 @@
   /remote-with-names/.hg/bookmarks.current
   /remote-with-names/.hg/cache/branch2-served
   /remote-with-names/.hg/cache/hgtagsfnodes1
-  /remote-with-names/.hg/cache/rbc-names-v1
-  /remote-with-names/.hg/cache/rbc-revs-v1
   /remote-with-names/.hg/cache/tags2-served
   /remote-with-names/.hg/dirstate
   /remote-with-names/.hg/localtags
@@ -266,7 +262,6 @@
   /remote/.hg/cache/branch2-served
   /remote/.hg/cache/hgtagsfnodes1
   /remote/.hg/cache/rbc-names-v1
-  /remote/.hg/cache/rbc-revs-v1
   /remote/.hg/cache/tags2-served
   /remote/.hg/dirstate
   /remote/.hg/localtags
@@ -288,8 +283,6 @@
   /sub/.hg/bookmarks
   /sub/.hg/bookmarks.current
   /sub/.hg/cache/hgtagsfnodes1
-  /sub/.hg/cache/rbc-names-v1
-  /sub/.hg/cache/rbc-revs-v1
   /sub/.hg/dirstate
   /sub/.hg/requires
   /sub/.hg/store/00changelog.i
--- a/tests/test-strip.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-strip.t	Thu Mar 18 18:24:59 2021 -0400
@@ -427,7 +427,7 @@
   strip failed, unrecovered changes stored in '$TESTTMP/test/.hg/strip-backup/*-temp.hg' (glob)
   (fix the problem, then recover the changesets with "hg unbundle '$TESTTMP/test/.hg/strip-backup/*-temp.hg'") (glob)
   abort: pretxnchangegroup.bad hook exited with status 1
-  [255]
+  [40]
   $ restore
   $ hg log -G
   o  changeset:   4:443431ffac4f
--- a/tests/test-tag.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-tag.t	Thu Mar 18 18:24:59 2021 -0400
@@ -290,7 +290,7 @@
   $ rm -f .hg/last-message.txt
   $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg tag custom-tag -e
   abort: pretag.test-saving-lastmessage hook exited with status 1
-  [255]
+  [40]
   $ test -f .hg/last-message.txt
   [1]
 
@@ -325,7 +325,7 @@
   note: commit message saved in .hg/last-message.txt
   note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
   abort: pretxncommit.unexpectedabort hook exited with status 1
-  [255]
+  [40]
   $ cat .hg/last-message.txt
   custom tag message
   second line
--- a/tests/test-tags.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-tags.t	Thu Mar 18 18:24:59 2021 -0400
@@ -104,7 +104,7 @@
   0010: ff ff ff ff ff ff ff ff b9 15 46 36 26 b7 b4 a7 |..........F6&...|
   0020: 73 e0 9e e3 c5 2f 51 0e 19 e0 5e 1f f9 66 d8 59 |s..../Q...^..f.Y|
   $ hg debugtagscache
-  0 acb14030fe0a21b60322c440ad2d20cf7685a376 missing/invalid
+  0 acb14030fe0a21b60322c440ad2d20cf7685a376 missing
   1 b9154636be938d3d431e75a7c906504a079bfe07 26b7b4a773e09ee3c52f510e19e05e1ff966d859
 
 Repeat with cold tag cache:
@@ -381,7 +381,7 @@
 
   $ hg debugtagscache | tail -2
   4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
-  5 8dbfe60eff306a54259cfe007db9e330e7ecf866 missing/invalid
+  5 8dbfe60eff306a54259cfe007db9e330e7ecf866 missing
   $ hg tags
   tip                                5:8dbfe60eff30
   bar                                1:78391a272241
@@ -389,6 +389,77 @@
   4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
   5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8af31de17fab7422878ee5a2dadbc943d
 
+If the 4 bytes of node hash for a record don't match an existing node, the entry
+is flagged as invalid.
+
+  >>> import os
+  >>> with open(".hg/cache/hgtagsfnodes1", "rb+") as fp:
+  ...     fp.seek(-24, os.SEEK_END) and None
+  ...     fp.write(b'\xde\xad') and None
+
+  $ f --size --hexdump .hg/cache/hgtagsfnodes1
+  .hg/cache/hgtagsfnodes1: size=144
+  0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+  0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+  0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+  0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
+  0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
+  0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
+  0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
+  0070: 78 ee 5a 2d ad bc 94 3d de ad e6 0e 0c 04 f2 a8 |x.Z-...=........|
+  0080: af 31 de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |.1....B(x.Z-...=|
+
+  $ hg debugtagscache | tail -2
+  4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
+  5 8dbfe60eff306a54259cfe007db9e330e7ecf866 invalid
+
+  $ hg tags
+  tip                                5:8dbfe60eff30
+  bar                                1:78391a272241
+
+BUG: If the filenode part of an entry in hgtagsfnodes is corrupt and
+tags2-visible is missing, `hg tags` aborts.  Corrupting the leading 4 bytes of
+node hash (as above) doesn't seem to trigger the issue.  Also note that the
+debug command hides the corruption, both with and without tags2-visible.
+
+  $ mv .hg/cache/hgtagsfnodes1 .hg/cache/hgtagsfnodes1.bak
+  $ hg debugupdatecaches
+
+  >>> import os
+  >>> with open(".hg/cache/hgtagsfnodes1", "rb+") as fp:
+  ...     fp.seek(-16, os.SEEK_END) and None
+  ...     fp.write(b'\xde\xad') and None
+
+  $ f --size --hexdump .hg/cache/hgtagsfnodes1
+  .hg/cache/hgtagsfnodes1: size=144
+  0000: bb d1 79 df 00 00 00 00 00 00 00 00 00 00 00 00 |..y.............|
+  0010: 00 00 00 00 00 00 00 00 78 39 1a 27 0c 04 f2 a8 |........x9.'....|
+  0020: af 31 de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |.1....B(x.Z-...=|
+  0030: 7a 94 12 77 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |z..w.....1....B(|
+  0040: 78 ee 5a 2d ad bc 94 3d 6f a4 50 21 7d 3b 71 8c |x.Z-...=o.P!};q.|
+  0050: 96 4e f3 7b 89 e5 50 eb da fd 57 89 e7 6c e1 b0 |.N.{..P...W..l..|
+  0060: 0c 19 2d 7d 0c 04 f2 a8 af 31 de 17 fa b7 42 28 |..-}.....1....B(|
+  0070: 78 ee 5a 2d ad bc 94 3d 8d bf e6 0e 0c 04 f2 a8 |x.Z-...=........|
+  0080: de ad de 17 fa b7 42 28 78 ee 5a 2d ad bc 94 3d |......B(x.Z-...=|
+
+  $ hg debugtagscache | tail -2
+  4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
+  5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d (unknown node)
+
+  $ rm -f .hg/cache/tags2-visible
+  $ hg debugtagscache | tail -2
+  4 0c192d7d5e6b78a714de54a2e9627952a877e25a 0c04f2a8af31de17fab7422878ee5a2dadbc943d
+  5 8dbfe60eff306a54259cfe007db9e330e7ecf866 0c04f2a8deadde17fab7422878ee5a2dadbc943d (unknown node)
+
+  $ hg tags
+  tip                                5:8dbfe60eff30
+  bar                                1:78391a272241
+
+BUG: Unless this file is restored, the `hg tags` in the next unix-permissions
+conditional will fail: "abort: data/.hgtags.i@0c04f2a8dead: no match found"
+
+  $ mv .hg/cache/hgtagsfnodes1.bak .hg/cache/hgtagsfnodes1
+
 #if unix-permissions no-root
 Errors writing to .hgtags fnodes cache are silently ignored
 
@@ -405,7 +476,7 @@
   $ hg blackbox -l 6
   1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
   1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> couldn't write cache/hgtagsfnodes1: [Errno *] * (glob)
-  1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 3/4 cache hits/lookups in * seconds (glob)
+  1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
   1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
   1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
   1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
@@ -420,7 +491,7 @@
   $ hg blackbox -l 6
   1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
   1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing 24 bytes to cache/hgtagsfnodes1
-  1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 3/4 cache hits/lookups in * seconds (glob)
+  1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
   1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
   1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
   1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
--- a/tests/test-transplant.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-transplant.t	Thu Mar 18 18:24:59 2021 -0400
@@ -1091,7 +1091,7 @@
   transaction abort!
   rollback completed
   abort: pretxncommit.abort hook exited with status 1
-  [255]
+  [40]
   $ cat >> .hg/hgrc <<EOF
   > [hooks]
   > pretxncommit.abort = !
--- a/tests/test-unamend.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-unamend.t	Thu Mar 18 18:24:59 2021 -0400
@@ -39,7 +39,7 @@
 
   $ hg unamend
   abort: changeset must have one predecessor, found 0 predecessors
-  [255]
+  [10]
 
 Unamend on clean wdir and tip
 
--- a/tests/test-uncommit.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-uncommit.t	Thu Mar 18 18:24:59 2021 -0400
@@ -114,12 +114,12 @@
   $ hg uncommit nothinghere
   abort: cannot uncommit "nothinghere"
   (file does not exist)
-  [255]
+  [10]
   $ hg status
   $ hg uncommit file-abc
   abort: cannot uncommit "file-abc"
   (file was not changed in working directory parent)
-  [255]
+  [10]
   $ hg status
 
 Try partial uncommit, also moves bookmark
@@ -419,7 +419,7 @@
 
   $ hg uncommit
   abort: cannot uncommit merge changeset
-  [255]
+  [10]
 
   $ hg status
   $ hg log -G -T '{rev}:{node} {desc}' --hidden
@@ -585,12 +585,12 @@
   $ hg uncommit emptydir
   abort: cannot uncommit "emptydir"
   (file was untracked in working directory parent)
-  [255]
+  [10]
 
   $ cd emptydir
   $ hg uncommit .
   abort: cannot uncommit "emptydir"
   (file was untracked in working directory parent)
-  [255]
+  [10]
   $ hg status
   $ cd ..
--- a/tests/test-up-local-change.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-up-local-change.t	Thu Mar 18 18:24:59 2021 -0400
@@ -175,7 +175,7 @@
   $ hg up 1
   b: untracked file differs
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
   $ rm b
 
 test conflicting untracked ignored file
@@ -195,7 +195,7 @@
   $ hg up 'desc("add ignored file")'
   ignored: untracked file differs
   abort: untracked files in working directory differ from files in requested revision
-  [255]
+  [20]
 
 test a local add
 
--- a/tests/test-update-branches.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-update-branches.t	Thu Mar 18 18:24:59 2021 -0400
@@ -324,7 +324,7 @@
   $ hg up -q 4
   abort: conflicting changes
   (commit or update --clean to discard changes)
-  [255]
+  [20]
   $ hg up -m 4
   merging a
   warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
--- a/tests/test-upgrade-repo.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-upgrade-repo.t	Thu Mar 18 18:24:59 2021 -0400
@@ -21,7 +21,7 @@
   > EOF
 
   $ hg -R no-revlogv1 debugupgraderepo
-  abort: cannot upgrade repository; requirement missing: revlogv1
+  abort: cannot upgrade repository; missing a revlog version
   [255]
 
 Cannot upgrade shared repositories
@@ -58,9 +58,9 @@
   generaldelta:       yes
   share-safe:          no
   sparserevlog:       yes
-  sidedata:            no
   persistent-nodemap:  no
   copies-sdc:          no
+  revlog-v2:           no
   plain-cl-delta:     yes
   compression:        zlib
   compression-level:  default
@@ -71,9 +71,9 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:            no     no      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no     no      no
+  revlog-v2:           no     no      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib
   compression-level:  default default default
@@ -84,9 +84,9 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:            no     no      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no     no      no
+  revlog-v2:           no     no      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib
   compression-level:  default default default
@@ -97,9 +97,9 @@
   [formatvariant.name.uptodate|generaldelta:      ][formatvariant.repo.uptodate| yes][formatvariant.config.default|    yes][formatvariant.default|     yes]
   [formatvariant.name.uptodate|share-safe:        ][formatvariant.repo.uptodate|  no][formatvariant.config.default|     no][formatvariant.default|      no]
   [formatvariant.name.uptodate|sparserevlog:      ][formatvariant.repo.uptodate| yes][formatvariant.config.default|    yes][formatvariant.default|     yes]
-  [formatvariant.name.uptodate|sidedata:          ][formatvariant.repo.uptodate|  no][formatvariant.config.default|     no][formatvariant.default|      no]
   [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate|  no][formatvariant.config.default|     no][formatvariant.default|      no]
   [formatvariant.name.uptodate|copies-sdc:        ][formatvariant.repo.uptodate|  no][formatvariant.config.default|     no][formatvariant.default|      no]
+  [formatvariant.name.uptodate|revlog-v2:         ][formatvariant.repo.uptodate|  no][formatvariant.config.default|     no][formatvariant.default|      no]
   [formatvariant.name.uptodate|plain-cl-delta:    ][formatvariant.repo.uptodate| yes][formatvariant.config.default|    yes][formatvariant.default|     yes]
   [formatvariant.name.uptodate|compression:       ][formatvariant.repo.uptodate| zlib][formatvariant.config.default|   zlib][formatvariant.default|    zlib]
   [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
@@ -138,12 +138,6 @@
    {
     "config": false,
     "default": false,
-    "name": "sidedata",
-    "repo": false
-   },
-   {
-    "config": false,
-    "default": false,
     "name": "persistent-nodemap",
     "repo": false
    },
@@ -154,6 +148,12 @@
     "repo": false
    },
    {
+    "config": false,
+    "default": false,
+    "name": "revlog-v2",
+    "repo": false
+   },
+   {
     "config": true,
     "default": true,
     "name": "plain-cl-delta",
@@ -303,9 +303,9 @@
   generaldelta:        no
   share-safe:          no
   sparserevlog:        no
-  sidedata:            no
   persistent-nodemap:  no
   copies-sdc:          no
+  revlog-v2:           no
   plain-cl-delta:     yes
   compression:        zlib
   compression-level:  default
@@ -316,9 +316,9 @@
   generaldelta:        no    yes     yes
   share-safe:          no     no      no
   sparserevlog:        no    yes     yes
-  sidedata:            no     no      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no     no      no
+  revlog-v2:           no     no      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib
   compression-level:  default default default
@@ -329,9 +329,9 @@
   generaldelta:        no     no     yes
   share-safe:          no     no      no
   sparserevlog:        no     no     yes
-  sidedata:            no     no      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no     no      no
+  revlog-v2:           no     no      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib
   compression-level:  default default default
@@ -342,9 +342,9 @@
   [formatvariant.name.mismatchdefault|generaldelta:      ][formatvariant.repo.mismatchdefault|  no][formatvariant.config.special|     no][formatvariant.default|     yes]
   [formatvariant.name.uptodate|share-safe:        ][formatvariant.repo.uptodate|  no][formatvariant.config.default|     no][formatvariant.default|      no]
   [formatvariant.name.mismatchdefault|sparserevlog:      ][formatvariant.repo.mismatchdefault|  no][formatvariant.config.special|     no][formatvariant.default|     yes]
-  [formatvariant.name.uptodate|sidedata:          ][formatvariant.repo.uptodate|  no][formatvariant.config.default|     no][formatvariant.default|      no]
   [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate|  no][formatvariant.config.default|     no][formatvariant.default|      no]
   [formatvariant.name.uptodate|copies-sdc:        ][formatvariant.repo.uptodate|  no][formatvariant.config.default|     no][formatvariant.default|      no]
+  [formatvariant.name.uptodate|revlog-v2:         ][formatvariant.repo.uptodate|  no][formatvariant.config.default|     no][formatvariant.default|      no]
   [formatvariant.name.uptodate|plain-cl-delta:    ][formatvariant.repo.uptodate| yes][formatvariant.config.default|    yes][formatvariant.default|     yes]
   [formatvariant.name.uptodate|compression:       ][formatvariant.repo.uptodate| zlib][formatvariant.config.default|   zlib][formatvariant.default|    zlib]
   [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
@@ -632,11 +632,9 @@
   data fully upgraded in a temporary repository
   marking source repository as being upgraded; clients will be unable to read from repository
   starting in-place swap of repository data
-  replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
   replacing store...
   store replacement complete; repository was inconsistent for * (glob)
   finalizing requirements file and making repository readable again
-  removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
   removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
   $ ls -1 .hg/ | grep upgradebackup
   [1]
@@ -679,11 +677,9 @@
   data fully upgraded in a temporary repository
   marking source repository as being upgraded; clients will be unable to read from repository
   starting in-place swap of repository data
-  replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
   replacing store...
   store replacement complete; repository was inconsistent for *s (glob)
   finalizing requirements file and making repository readable again
-  removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
   removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
 
 Check that the repo still works fine
@@ -759,11 +755,9 @@
   data fully upgraded in a temporary repository
   marking source repository as being upgraded; clients will be unable to read from repository
   starting in-place swap of repository data
-  replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
   replacing store...
   store replacement complete; repository was inconsistent for *s (glob)
   finalizing requirements file and making repository readable again
-  removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
   removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
   $ hg verify
   checking changesets
@@ -810,11 +804,9 @@
   data fully upgraded in a temporary repository
   marking source repository as being upgraded; clients will be unable to read from repository
   starting in-place swap of repository data
-  replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
   replacing store...
   store replacement complete; repository was inconsistent for *s (glob)
   finalizing requirements file and making repository readable again
-  removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
   removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
   $ hg verify
   checking changesets
@@ -861,11 +853,9 @@
   data fully upgraded in a temporary repository
   marking source repository as being upgraded; clients will be unable to read from repository
   starting in-place swap of repository data
-  replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
   replacing store...
   store replacement complete; repository was inconsistent for *s (glob)
   finalizing requirements file and making repository readable again
-  removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
   removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
   $ hg verify
   checking changesets
@@ -919,11 +909,9 @@
   data fully upgraded in a temporary repository
   marking source repository as being upgraded; clients will be unable to read from repository
   starting in-place swap of repository data
-  replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
   replacing store...
   store replacement complete; repository was inconsistent for *s (glob)
   finalizing requirements file and making repository readable again
-  removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
   removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
   $ hg verify
   checking changesets
@@ -978,11 +966,9 @@
   data fully upgraded in a temporary repository
   marking source repository as being upgraded; clients will be unable to read from repository
   starting in-place swap of repository data
-  replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
   replacing store...
   store replacement complete; repository was inconsistent for *s (glob)
   finalizing requirements file and making repository readable again
-  removing old repository content $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
   removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
   $ hg verify
   checking changesets
@@ -1299,9 +1285,9 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:            no     no      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no     no      no
+  revlog-v2:           no     no      no
   plain-cl-delta:     yes    yes     yes
   compression:        zstd   zlib    zlib
   compression-level:  default default default
@@ -1335,9 +1321,9 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:            no     no      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no     no      no
+  revlog-v2:           no     no      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib
   compression-level:  default default default
@@ -1374,9 +1360,9 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:            no     no      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no     no      no
+  revlog-v2:           no     no      no
   plain-cl-delta:     yes    yes     yes
   compression:        zstd   zstd    zlib
   compression-level:  default default default
@@ -1400,10 +1386,11 @@
   upgrade will perform the following actions:
   
   requirements
-     preserved: dotencode, fncache, generaldelta, revlogv1, store (no-zstd !)
-     preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
-     added: exp-sidedata-flag (zstd !)
-     added: exp-sidedata-flag, sparserevlog (no-zstd !)
+     preserved: dotencode, fncache, generaldelta, store (no-zstd !)
+     preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, sparserevlog, store (zstd !)
+     removed: revlogv1
+     added: exp-revlogv2.2, exp-sidedata-flag (zstd !)
+     added: exp-revlogv2.2, exp-sidedata-flag, sparserevlog (no-zstd !)
   
   processed revlogs:
     - all-filelogs
@@ -1417,20 +1404,20 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:           yes     no      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no     no      no
+  revlog-v2:          yes     no      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib (no-zstd !)
   compression:        zstd   zstd    zlib (zstd !)
   compression-level:  default default default
   $ cat .hg/requires
   dotencode
+  exp-revlogv2.2
   exp-sidedata-flag
   fncache
   generaldelta
   revlog-compression-zstd (zstd !)
-  revlogv1
   sparserevlog
   store
   $ hg debugsidedata -c 0
@@ -1444,9 +1431,10 @@
   upgrade will perform the following actions:
   
   requirements
-     preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
-     preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
-     removed: exp-sidedata-flag
+     preserved: dotencode, fncache, generaldelta, sparserevlog, store (no-zstd !)
+     preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, sparserevlog, store (zstd !)
+     removed: exp-revlogv2.2, exp-sidedata-flag
+     added: revlogv1
   
   processed revlogs:
     - all-filelogs
@@ -1460,9 +1448,9 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:            no     no      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no     no      no
+  revlog-v2:           no     no      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib (no-zstd !)
   compression:        zstd   zstd    zlib (zstd !)
@@ -1487,9 +1475,10 @@
   upgrade will perform the following actions:
   
   requirements
-     preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
-     preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
-     added: exp-sidedata-flag
+     preserved: dotencode, fncache, generaldelta, sparserevlog, store (no-zstd !)
+     preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, sparserevlog, store (zstd !)
+     removed: revlogv1
+     added: exp-revlogv2.2, exp-sidedata-flag
   
   processed revlogs:
     - all-filelogs
@@ -1503,20 +1492,20 @@
   generaldelta:       yes    yes     yes
   share-safe:          no     no      no
   sparserevlog:       yes    yes     yes
-  sidedata:           yes    yes      no
   persistent-nodemap:  no     no      no
   copies-sdc:          no     no      no
+  revlog-v2:          yes    yes      no
   plain-cl-delta:     yes    yes     yes
   compression:        zlib   zlib    zlib (no-zstd !)
   compression:        zstd   zstd    zlib (zstd !)
   compression-level:  default default default
   $ cat .hg/requires
   dotencode
+  exp-revlogv2.2
   exp-sidedata-flag
   fncache
   generaldelta
   revlog-compression-zstd (zstd !)
-  revlogv1
   sparserevlog
   store
   $ hg debugsidedata -c 0
--- a/tests/test-url-download.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-url-download.t	Thu Mar 18 18:24:59 2021 -0400
@@ -34,6 +34,8 @@
   $ hg debugdownload ./null.txt
   1 0000000000000000000000000000000000000000
 
+  $ cat ../error.log
+
 Test largefile URL
 ------------------
 
@@ -66,3 +68,5 @@
   $ hg debugdownload "largefile://a57b57b39ee4dc3da1e03526596007f480ecdbe8"
   1 0000000000000000000000000000000000000000
   $ cd ..
+
+  $ cat error.log
--- a/tests/test-win32text.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-win32text.t	Thu Mar 18 18:24:59 2021 -0400
@@ -38,7 +38,7 @@
   transaction abort!
   rollback completed
   abort: pretxncommit.crlf hook failed
-  [255]
+  [40]
 
   $ mv .hg/hgrc .hg/hgrc.bak
 
@@ -77,7 +77,7 @@
   transaction abort!
   rollback completed
   abort: pretxnchangegroup.crlf hook failed
-  [255]
+  [40]
 
   $ mv .hg/hgrc.bak .hg/hgrc
   $ echo hello > f
@@ -109,7 +109,7 @@
   transaction abort!
   rollback completed
   abort: pretxncommit.crlf hook failed
-  [255]
+  [40]
   $ hg revert -a
   forgetting d/f2
   $ rm d/f2
@@ -286,7 +286,7 @@
   transaction abort!
   rollback completed
   abort: pretxnchangegroup.crlf hook failed
-  [255]
+  [40]
 
   $ hg log -v
   changeset:   5:f0b1c8d75fce
--- a/tests/test-wireproto-command-capabilities.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-wireproto-command-capabilities.t	Thu Mar 18 18:24:59 2021 -0400
@@ -150,7 +150,7 @@
   s>     Content-Type: application/mercurial-cbor\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   cbor> [
     {
       b'apibase': b'api/',
@@ -190,7 +190,7 @@
   s>     Content-Type: application/mercurial-cbor\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   cbor> [
     {
       b'apibase': b'api/',
@@ -223,7 +223,7 @@
   s>     Content-Type: application/mercurial-cbor\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   cbor> [
     {
       b'apibase': b'api/',
@@ -484,7 +484,7 @@
   s>     Content-Type: application/mercurial-cbor\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   sending capabilities command
   s> setsockopt(6, 1, 1) -> None (?)
   s>     POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
--- a/tests/test-wireproto-content-redirects.t	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/test-wireproto-content-redirects.t	Thu Mar 18 18:24:59 2021 -0400
@@ -66,9 +66,9 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-cbor\r\n
-  s>     Content-Length: 2308\r\n
+  s>     Content-Length: 2289\r\n
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   (remote redirect target target-a is compatible) (tls1.2 !)
   (remote redirect target target-a requires unsupported TLS versions: 1.2, 1.3) (no-tls1.2 !)
   sending capabilities command
@@ -396,9 +396,9 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-cbor\r\n
-  s>     Content-Length: 2335\r\n
+  s>     Content-Length: 2316\r\n
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   (remote redirect target target-a is compatible)
   (remote redirect target target-b uses unsupported protocol: unknown)
   sending capabilities command
@@ -731,9 +731,9 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-cbor\r\n
-  s>     Content-Length: 2295\r\n
+  s>     Content-Length: 2276\r\n
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   (redirect target target-bad-tls requires SNI, which is unsupported)
   sending capabilities command
   s> setsockopt(6, 1, 1) -> None (?)
@@ -1055,9 +1055,9 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-cbor\r\n
-  s>     Content-Length: 2301\r\n
+  s>     Content-Length: 2282\r\n
   s>     \r\n
-  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xf7batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   (remote redirect target target-bad-tls requires unsupported TLS versions: 39, 42)
   sending capabilities command
   s> setsockopt(6, 1, 1) -> None (?)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/testlib/common.sh	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,7 @@
+mkcommit() {
+   name="$1"
+   shift
+   echo "$name" > "$name"
+   hg add "$name"
+   hg ci -m "$name" "$@"
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/testlib/ext-sidedata-2.py	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,50 @@
+# coding: utf8
+# ext-sidedata-2.py - small extension to test (differently) the sidedata logic
+#
+# Simulates a client for a complex sidedata exchange.
+#
+# Copyright 2021 Raphaël Gomès <rgomes@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import hashlib
+import struct
+
+from mercurial.revlogutils import sidedata as sidedatamod
+
+
+def compute_sidedata_1(repo, revlog, rev, sidedata, text=None):
+    sidedata = sidedata.copy()
+    if text is None:
+        text = revlog.revision(rev)
+    sidedata[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
+    return sidedata
+
+
+def compute_sidedata_2(repo, revlog, rev, sidedata, text=None):
+    sidedata = sidedata.copy()
+    if text is None:
+        text = revlog.revision(rev)
+    sha256 = hashlib.sha256(text).digest()
+    sidedata[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
+    return sidedata
+
+
+def reposetup(ui, repo):
+    # Sidedata keys happen to be the same as the categories, easier for testing.
+    for kind in (b'changelog', b'manifest', b'filelog'):
+        repo.register_sidedata_computer(
+            kind,
+            sidedatamod.SD_TEST1,
+            (sidedatamod.SD_TEST1,),
+            compute_sidedata_1,
+        )
+        repo.register_sidedata_computer(
+            kind,
+            sidedatamod.SD_TEST2,
+            (sidedatamod.SD_TEST2,),
+            compute_sidedata_2,
+        )
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/testlib/ext-sidedata-3.py	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,88 @@
+# coding: utf8
+# ext-sidedata-3.py - small extension to test (differently still) the sidedata
+# logic
+#
+# Simulates a client for a complex sidedata exchange.
+#
+# Copyright 2021 Raphaël Gomès <rgomes@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import hashlib
+import struct
+
+from mercurial import (
+    extensions,
+    revlog,
+)
+
+from mercurial.revlogutils import sidedata as sidedatamod
+
+
+def compute_sidedata_1(repo, revlog, rev, sidedata, text=None):
+    sidedata = sidedata.copy()
+    if text is None:
+        text = revlog.revision(rev)
+    sidedata[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
+    return sidedata
+
+
+def compute_sidedata_2(repo, revlog, rev, sidedata, text=None):
+    sidedata = sidedata.copy()
+    if text is None:
+        text = revlog.revision(rev)
+    sha256 = hashlib.sha256(text).digest()
+    sidedata[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
+    return sidedata
+
+
+def compute_sidedata_3(repo, revlog, rev, sidedata, text=None):
+    sidedata = sidedata.copy()
+    if text is None:
+        text = revlog.revision(rev)
+    sha384 = hashlib.sha384(text).digest()
+    sidedata[sidedatamod.SD_TEST3] = struct.pack('>48s', sha384)
+    return sidedata
+
+
+def wrapaddrevision(
+    orig, self, text, transaction, link, p1, p2, *args, **kwargs
+):
+    if kwargs.get('sidedata') is None:
+        kwargs['sidedata'] = {}
+    sd = kwargs['sidedata']
+    sd = compute_sidedata_1(None, self, None, sd, text=text)
+    kwargs['sidedata'] = compute_sidedata_2(None, self, None, sd, text=text)
+    return orig(self, text, transaction, link, p1, p2, *args, **kwargs)
+
+
+def extsetup(ui):
+    extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision)
+
+
+def reposetup(ui, repo):
+    # Sidedata keys happen to be the same as the categories, easier for testing.
+    for kind in (b'changelog', b'manifest', b'filelog'):
+        repo.register_sidedata_computer(
+            kind,
+            sidedatamod.SD_TEST1,
+            (sidedatamod.SD_TEST1,),
+            compute_sidedata_1,
+        )
+        repo.register_sidedata_computer(
+            kind,
+            sidedatamod.SD_TEST2,
+            (sidedatamod.SD_TEST2,),
+            compute_sidedata_2,
+        )
+        repo.register_sidedata_computer(
+            kind,
+            sidedatamod.SD_TEST3,
+            (sidedatamod.SD_TEST3,),
+            compute_sidedata_3,
+        )
+    repo.register_wanted_sidedata(sidedatamod.SD_TEST1)
+    repo.register_wanted_sidedata(sidedatamod.SD_TEST2)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/testlib/ext-sidedata-4.py	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,19 @@
+# coding: utf8
+# ext-sidedata-4.py - small extension to test (differently still) the sidedata
+# logic
+#
+# Simulates a server for a complex sidedata exchange.
+#
+# Copyright 2021 Raphaël Gomès <rgomes@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial.revlogutils import sidedata
+
+
+def reposetup(ui, repo):
+    repo.register_wanted_sidedata(sidedata.SD_TEST2)
+    repo.register_wanted_sidedata(sidedata.SD_TEST3)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/testlib/ext-sidedata-5.py	Thu Mar 18 18:24:59 2021 -0400
@@ -0,0 +1,81 @@
+# coding: utf8
+# ext-sidedata-5.py - small extension to test (differently still) the sidedata
+# logic
+#
+# Simulates a server for a simple sidedata exchange.
+#
+# Copyright 2021 Raphaël Gomès <rgomes@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import hashlib
+import struct
+
+from mercurial import (
+    extensions,
+    revlog,
+)
+
+
+from mercurial.revlogutils import sidedata as sidedatamod
+
+
+def compute_sidedata_1(repo, revlog, rev, sidedata, text=None):
+    sidedata = sidedata.copy()
+    if text is None:
+        text = revlog.revision(rev)
+    sidedata[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
+    return sidedata
+
+
+def compute_sidedata_2(repo, revlog, rev, sidedata, text=None):
+    sidedata = sidedata.copy()
+    if text is None:
+        text = revlog.revision(rev)
+    sha256 = hashlib.sha256(text).digest()
+    sidedata[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
+    return sidedata
+
+
+def reposetup(ui, repo):
+    # Sidedata keys happen to be the same as the categories, easier for testing.
+    for kind in (b'changelog', b'manifest', b'filelog'):
+        repo.register_sidedata_computer(
+            kind,
+            sidedatamod.SD_TEST1,
+            (sidedatamod.SD_TEST1,),
+            compute_sidedata_1,
+        )
+        repo.register_sidedata_computer(
+            kind,
+            sidedatamod.SD_TEST2,
+            (sidedatamod.SD_TEST2,),
+            compute_sidedata_2,
+        )
+
+    # We don't register sidedata computers because we don't care within these
+    # tests
+    repo.register_wanted_sidedata(sidedatamod.SD_TEST1)
+    repo.register_wanted_sidedata(sidedatamod.SD_TEST2)
+
+
+def wrapaddrevision(
+    orig, self, text, transaction, link, p1, p2, *args, **kwargs
+):
+    if kwargs.get('sidedata') is None:
+        kwargs['sidedata'] = {}
+    sd = kwargs['sidedata']
+    ## let's store some arbitrary data just for testing
+    # text length
+    sd[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
+    # and sha2 hashes
+    sha256 = hashlib.sha256(text).digest()
+    sd[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
+    return orig(self, text, transaction, link, p1, p2, *args, **kwargs)
+
+
+def extsetup(ui):
+    extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision)
--- a/tests/testlib/ext-sidedata.py	Sat Mar 13 02:09:23 2021 -0500
+++ b/tests/testlib/ext-sidedata.py	Thu Mar 18 18:24:59 2021 -0400
@@ -1,6 +1,6 @@
 # ext-sidedata.py - small extension to test the sidedata logic
 #
-# Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net)
+# Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
 #
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
@@ -40,19 +40,21 @@
     return orig(self, text, transaction, link, p1, p2, *args, **kwargs)
 
 
-def wraprevision(orig, self, nodeorrev, *args, **kwargs):
-    text = orig(self, nodeorrev, *args, **kwargs)
+def wrap_revisiondata(orig, self, nodeorrev, *args, **kwargs):
+    text, sd = orig(self, nodeorrev, *args, **kwargs)
     if getattr(self, 'sidedatanocheck', False):
-        return text
+        return text, sd
+    if self.version & 0xFFFF != 2:
+        return text, sd
     if nodeorrev != nullrev and nodeorrev != nullid:
-        sd = self.sidedata(nodeorrev)
-        if len(text) != struct.unpack('>I', sd[sidedata.SD_TEST1])[0]:
+        cat1 = sd.get(sidedata.SD_TEST1)
+        if cat1 is not None and len(text) != struct.unpack('>I', cat1)[0]:
             raise RuntimeError('text size mismatch')
-        expected = sd[sidedata.SD_TEST2]
+        expected = sd.get(sidedata.SD_TEST2)
         got = hashlib.sha256(text).digest()
-        if got != expected:
+        if expected is not None and got != expected:
             raise RuntimeError('sha256 mismatch')
-    return text
+    return text, sd
 
 
 def wrapgetsidedatacompanion(orig, srcrepo, dstrepo):
@@ -81,7 +83,14 @@
 
 def extsetup(ui):
     extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision)
-    extensions.wrapfunction(revlog.revlog, 'revision', wraprevision)
+    extensions.wrapfunction(revlog.revlog, '_revisiondata', wrap_revisiondata)
     extensions.wrapfunction(
         upgrade_engine, 'getsidedatacompanion', wrapgetsidedatacompanion
     )
+
+
+def reposetup(ui, repo):
+    # We don't register sidedata computers because we don't care within these
+    # tests
+    repo.register_wanted_sidedata(sidedata.SD_TEST1)
+    repo.register_wanted_sidedata(sidedata.SD_TEST2)